You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by gp...@apache.org on 2019/01/04 06:53:55 UTC

[drill] branch master updated (8a56fe6 -> 7a25d9d)

This is an automated email from the ASF dual-hosted git repository.

gparai pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git.


    from 8a56fe6  DRILL-6901: Move schema builder to src/main
     new d96bea5  DRILL-6936: TestGracefulShutdown.gracefulShutdownThreadShouldBeInitializedBeforeClosingDrillbit fails if loopback address is set in hosts closes #1589
     new 922beab  DRILL-6934: Update the option documentation for planner.enable_unnest_lateral closes #1587
     new 7108f16  DRILL-6931: File listing: fix issue for S3 directory objects and improve performance for recursive listing closes #1590
     new e7558b7  DRILL-6929: Exclude maprfs jar for default profile closes #1586
     new 8a85879  DRILL-6921: Add Clear button for /options filter
     new f687da8  DRILL-6907: Fix hive-exec-shaded classes recognition in IntelliJ IDEA closes #1575
     new a933136  DRILL-6894: CTAS and CTTAS are not working on S3 storage when cache is disabled
     new 10b1059  DRILL-6888: Move nested classes outside HashAggTemplate to allow for plain java compile option closes #1569
     new e65079a  DRILL-6879: Show warnings for potential performance issues
     new 7a25d9d  DRILL-540: Allow querying hive views in Drill

The 10 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/drill/common/AutoCloseables.java    |  22 +
 contrib/format-maprdb/pom.xml                      |   3 +-
 contrib/storage-hive/core/pom.xml                  |  33 +-
 ...ertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java |  24 +-
 .../planner/types/HiveToRelDataTypeConverter.java  | 158 ++++
 .../drill/exec/store/hive/ColumnListsCache.java    |  35 +-
 .../drill/exec/store/hive/HiveStoragePlugin.java   |  48 +-
 .../exec/store/hive/schema/DrillHiveTable.java     | 166 +---
 .../exec/store/hive/schema/DrillHiveViewTable.java |  89 ++-
 .../exec/store/hive/schema/HiveSchemaFactory.java  |  46 +-
 .../drill/exec/hive/TestHiveViewsSupport.java      | 233 ++++++
 .../exec/hive/TestInfoSchemaOnHiveStorage.java     |  14 +-
 .../hive/BaseTestHiveImpersonation.java            |  44 +-
 .../hive/TestSqlStdBasedAuthorization.java         | 252 +++++--
 .../hive/TestStorageBasedHiveAuthorization.java    | 839 ++++++++++++++-------
 .../exec/sql/hive/TestViewSupportOnHiveTables.java |   5 +-
 .../exec/store/hive/HiveTestDataGenerator.java     |  14 +-
 contrib/storage-hive/hive-exec-shade/pom.xml       |  35 +
 contrib/storage-jdbc/pom.xml                       |   1 -
 exec/java-exec/pom.xml                             |   1 -
 .../java/org/apache/drill/exec/ExecConstants.java  |  13 +
 .../impl/aggregate/HashAggSpilledPartition.java    |  51 ++
 .../physical/impl/aggregate/HashAggTemplate.java   |  58 +-
 .../physical/impl/aggregate/HashAggUpdater.java}   |  32 +-
 .../drill/exec/planner/logical/DrillViewTable.java |  14 +-
 .../exec/planner/physical/PlannerSettings.java     |   2 +-
 .../planner/sql/handlers/ShowFilesHandler.java     |   8 +-
 .../drill/exec/server/rest/StatusResources.java    |  55 +-
 .../exec/server/rest/profile/FragmentWrapper.java  |  39 +-
 .../exec/server/rest/profile/HtmlAttribute.java    |  37 +
 .../exec/server/rest/profile/OperatorWrapper.java  |  94 ++-
 .../exec/server/rest/profile/ProfileResources.java |   4 +-
 .../exec/server/rest/profile/ProfileWrapper.java   |  16 +-
 .../exec/server/rest/profile/TableBuilder.java     | 140 ++--
 .../exec/store/easy/json/JSONFormatPlugin.java     |   9 +-
 .../exec/store/easy/json/JsonRecordWriter.java     |   9 +-
 .../exec/store/easy/text/TextFormatPlugin.java     |   9 +-
 .../store/ischema/InfoSchemaRecordGenerator.java   |   3 +-
 .../exec/store/parquet/ParquetFormatPlugin.java    |   3 -
 .../exec/store/parquet/ParquetRecordWriter.java    |   3 +-
 .../exec/store/text/DrillTextRecordWriter.java     |   9 +-
 .../org/apache/drill/exec/util/FileSystemUtil.java | 230 +++---
 .../java-exec/src/main/resources/drill-module.conf |  14 +-
 exec/java-exec/src/main/resources/rest/options.ftl |  50 +-
 .../src/main/resources/rest/profile/profile.ftl    | 113 ++-
 .../src/main/resources/rest/static/img/turtle.png  | Bin 0 -> 469 bytes
 .../apache/drill/test/TestGracefulShutdown.java    |  46 +-
 logical/pom.xml                                    |   3 -
 pom.xml                                            |   5 +
 49 files changed, 2144 insertions(+), 987 deletions(-)
 rename contrib/storage-hive/core/{src => scrMapr}/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java (90%)
 create mode 100644 contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/types/HiveToRelDataTypeConverter.java
 create mode 100644 contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveViewsSupport.java
 create mode 100644 exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggSpilledPartition.java
 copy exec/{vector/src/main/java/org/apache/drill/exec/vector/complex/impl/UntypedReaderImpl.java => java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggUpdater.java} (61%)
 create mode 100644 exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/HtmlAttribute.java
 create mode 100644 exec/java-exec/src/main/resources/rest/static/img/turtle.png


[drill] 04/10: DRILL-6929: Exclude maprfs jar for default profile closes #1586

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit e7558b7909e855d36d5664b93e9b565f8cedca19
Author: Volodymyr Vysotskyi <vv...@gmail.com>
AuthorDate: Wed Dec 26 20:31:58 2018 +0200

    DRILL-6929: Exclude maprfs jar for default profile
    closes #1586
---
 contrib/format-maprdb/pom.xml                      |  3 +-
 contrib/storage-hive/core/pom.xml                  | 33 +++++++++++++++++----
 ...ertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java | 24 +++++----------
 .../drill/exec/store/hive/HiveStoragePlugin.java   | 34 +++++++++++++++-------
 contrib/storage-jdbc/pom.xml                       |  1 -
 exec/java-exec/pom.xml                             |  1 -
 pom.xml                                            |  5 ++++
 7 files changed, 66 insertions(+), 35 deletions(-)

diff --git a/contrib/format-maprdb/pom.xml b/contrib/format-maprdb/pom.xml
index 5f238e1..0a09c3d 100644
--- a/contrib/format-maprdb/pom.xml
+++ b/contrib/format-maprdb/pom.xml
@@ -100,7 +100,6 @@
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>build-helper-maven-plugin</artifactId>
-        <version>1.9.1</version>
         <executions>
           <execution>
             <id>add-sources-as-resources</id>
@@ -187,12 +186,14 @@
       <artifactId>maprdb</artifactId>
       <version>${mapr.release.version}</version>
       <classifier>tests</classifier>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>com.mapr.hadoop</groupId>
       <artifactId>maprfs</artifactId>
       <version>${mapr.release.version}</version>
       <classifier>tests</classifier>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>com.jcraft</groupId>
diff --git a/contrib/storage-hive/core/pom.xml b/contrib/storage-hive/core/pom.xml
index edb6a50..0203efb 100644
--- a/contrib/storage-hive/core/pom.xml
+++ b/contrib/storage-hive/core/pom.xml
@@ -142,11 +142,6 @@
         </exclusion>
       </exclusions>
     </dependency>
-    <dependency>
-      <groupId>org.apache.drill.contrib</groupId>
-      <artifactId>drill-format-mapr</artifactId>
-      <version>${project.version}</version>
-    </dependency>
   </dependencies>
 
   <build>
@@ -175,15 +170,41 @@
   <profiles>
     <profile>
       <id>mapr</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>build-helper-maven-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>add-mapr-sources</id>
+                <phase>generate-sources</phase>
+                <goals>
+                  <goal>add-source</goal>
+                </goals>
+                <configuration>
+                  <sources>
+                    <source>scrMapr/main/java</source>
+                  </sources>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
       <dependencies>
         <dependency>
+          <groupId>org.apache.drill.contrib</groupId>
+          <artifactId>drill-format-mapr</artifactId>
+          <version>${project.version}</version>
+        </dependency>
+        <dependency>
           <groupId>com.tdunning</groupId>
           <artifactId>json</artifactId>
         </dependency>
         <dependency>
           <groupId>org.apache.hive</groupId>
           <artifactId>hive-maprdb-json-handler</artifactId>
-          <scope>runtime</scope>
         </dependency>
         <dependency>
           <groupId>com.mapr.db</groupId>
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java b/contrib/storage-hive/core/scrMapr/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java
similarity index 90%
rename from contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java
rename to contrib/storage-hive/core/scrMapr/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java
index 4994a72..b8c2675 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java
+++ b/contrib/storage-hive/core/scrMapr/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan.java
@@ -21,27 +21,26 @@ import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.planner.logical.DrillScanRel;
 import org.apache.drill.exec.planner.logical.RelOptHelper;
 import org.apache.drill.exec.store.StoragePluginOptimizerRule;
 import org.apache.drill.exec.store.hive.HiveMetadataProvider;
 import org.apache.drill.exec.store.hive.HiveReadEntry;
 import org.apache.drill.exec.store.hive.HiveScan;
+import org.apache.drill.exec.store.hive.HiveUtilities;
 import org.apache.drill.exec.store.mapr.db.MapRDBFormatPlugin;
 import org.apache.drill.exec.store.mapr.db.MapRDBFormatPluginConfig;
 import org.apache.drill.exec.store.mapr.db.json.JsonScanSpec;
 import org.apache.drill.exec.store.mapr.db.json.JsonTableGroupScan;
+import org.apache.hadoop.hive.maprdb.json.input.HiveMapRDBJsonInputFormat;
 import org.ojai.DocumentConstants;
 
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 
-import static org.apache.drill.exec.store.hive.HiveUtilities.nativeReadersRuleMatches;
-
 /**
  * Convert Hive scan to use Drill's native MapR-DB reader instead of Hive's MapR-DB JSON Handler.
  */
@@ -69,15 +68,7 @@ public class ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan extends StoragePlugi
    */
   @Override
   public boolean matches(RelOptRuleCall call) {
-    try {
-      return nativeReadersRuleMatches(call,
-          Class.forName("org.apache.hadoop.hive.maprdb.json.input.HiveMapRDBJsonInputFormat"));
-    } catch (ClassNotFoundException e) {
-      throw UserException.resourceError(e)
-          .message("Current Drill build is not designed for working with Hive MapR-DB tables. " +
-              "Please disable \"%s\" option", ExecConstants.HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER)
-          .build(logger);
-    }
+    return HiveUtilities.nativeReadersRuleMatches(call, HiveMapRDBJsonInputFormat.class);
   }
 
   @Override
@@ -110,15 +101,16 @@ public class ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan extends StoragePlugi
         To ensure Drill MapR-DB Json scan will be chosen, reduce Hive scan importance to 0.
        */
       call.getPlanner().setImportance(hiveScanRel, 0.0);
-    } catch (final Exception e) {
-      logger.warn("Failed to convert HiveScan to JsonScanSpec", e);
+    } catch (DrillRuntimeException e) {
+      // TODO: Improve error handling after allowing to throw IOException from StoragePlugin.getFormatPlugin()
+      logger.warn("Failed to convert HiveScan to JsonScanSpec. Fallback to HiveMapR-DB connector.", e);
     }
   }
 
   /**
    * Helper method which creates a DrillScanRel with native Drill HiveScan.
    */
-  private DrillScanRel createNativeScanRel(final DrillScanRel hiveScanRel) throws Exception {
+  private DrillScanRel createNativeScanRel(final DrillScanRel hiveScanRel) {
     RelDataTypeFactory typeFactory = hiveScanRel.getCluster().getTypeFactory();
     HiveScan hiveScan = (HiveScan) hiveScanRel.getGroupScan();
     Map<String, String> parameters = hiveScan.getHiveReadEntry().getHiveTableWrapper().getParameters();
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
index a65a69e..a8c789d 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
@@ -27,6 +27,7 @@ import java.util.Set;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 
+import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableSet;
 
 import org.apache.calcite.schema.Schema.TableType;
@@ -41,7 +42,6 @@ import org.apache.drill.common.logical.FormatPluginConfig;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.ops.OptimizerRulesContext;
 import org.apache.drill.exec.physical.base.AbstractGroupScan;
-import org.apache.drill.exec.planner.sql.logical.ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan;
 import org.apache.drill.exec.planner.sql.logical.ConvertHiveParquetScanToDrillParquetScan;
 import org.apache.drill.exec.planner.sql.logical.HivePushPartitionFilterIntoScan;
 import org.apache.drill.exec.server.DrillbitContext;
@@ -55,8 +55,7 @@ import org.apache.drill.exec.store.hive.schema.HiveSchemaFactory;
 
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.drill.exec.store.mapr.db.MapRDBFormatPlugin;
-import org.apache.drill.exec.store.mapr.db.MapRDBFormatPluginConfig;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -217,7 +216,14 @@ public class HiveStoragePlugin extends AbstractStoragePlugin {
       ruleBuilder.add(ConvertHiveParquetScanToDrillParquetScan.INSTANCE);
     }
     if (options.getBoolean(ExecConstants.HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER)) {
-      ruleBuilder.add(ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan.INSTANCE);
+      try {
+        Class<?> hiveToDrillMapRDBJsonRuleClass =
+            Class.forName("org.apache.drill.exec.planner.sql.logical.ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan");
+        ruleBuilder.add((StoragePluginOptimizerRule) hiveToDrillMapRDBJsonRuleClass.getField("INSTANCE").get(null));
+      } catch (ReflectiveOperationException e) {
+        logger.warn("Current Drill build is not designed for working with Hive MapR-DB tables. " +
+            "Please disable {} option", ExecConstants.HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER);
+      }
     }
     return ruleBuilder.build();
   }
@@ -225,13 +231,21 @@ public class HiveStoragePlugin extends AbstractStoragePlugin {
   @Override
   public FormatPlugin getFormatPlugin(FormatPluginConfig formatConfig) {
     //  TODO: implement formatCreator similar to FileSystemPlugin formatCreator. DRILL-6621
-    if (formatConfig instanceof MapRDBFormatPluginConfig) {
-      try {
-        return new MapRDBFormatPlugin(HIVE_MAPRDB_FORMAT_PLUGIN_NAME, context, hiveConf, config,
-            (MapRDBFormatPluginConfig) formatConfig);
-      } catch (IOException e) {
-        throw new DrillRuntimeException("The error is occurred while connecting to MapR-DB", e);
+    try {
+      Class<?> mapRDBFormatPluginConfigClass =
+          Class.forName("org.apache.drill.exec.store.mapr.db.MapRDBFormatPluginConfig");
+      Class<?> mapRDBFormatPluginClass =
+          Class.forName("org.apache.drill.exec.store.mapr.db.MapRDBFormatPlugin");
+
+      if (mapRDBFormatPluginConfigClass.isInstance(formatConfig)) {
+        return (FormatPlugin) mapRDBFormatPluginClass.getConstructor(
+              new Class[]{String.class, DrillbitContext.class, Configuration.class,
+                  StoragePluginConfig.class, mapRDBFormatPluginConfigClass})
+          .newInstance(
+              new Object[]{HIVE_MAPRDB_FORMAT_PLUGIN_NAME, context, hiveConf, config, formatConfig});
       }
+    } catch (ReflectiveOperationException e) {
+      throw new DrillRuntimeException("The error is occurred while connecting to MapR-DB or instantiating mapRDBFormatPlugin", e);
     }
     throw new DrillRuntimeException(String.format("Hive storage plugin doesn't support usage of %s format plugin",
         formatConfig.getClass().getName()));
diff --git a/contrib/storage-jdbc/pom.xml b/contrib/storage-jdbc/pom.xml
index eca206a..efaf5c1 100755
--- a/contrib/storage-jdbc/pom.xml
+++ b/contrib/storage-jdbc/pom.xml
@@ -153,7 +153,6 @@
         <!-- Allows us to reserve ports for external servers that we will launch  -->
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>build-helper-maven-plugin</artifactId>
-        <version>3.0.0</version>
         <executions>
           <execution>
             <id>reserve-network-port</id>
diff --git a/exec/java-exec/pom.xml b/exec/java-exec/pom.xml
index 6e6a9d8..eff336c 100644
--- a/exec/java-exec/pom.xml
+++ b/exec/java-exec/pom.xml
@@ -752,7 +752,6 @@
       <plugin> <!-- source file must end up in the jar for janino parsing -->
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>build-helper-maven-plugin</artifactId>
-        <version>1.9.1</version>
         <executions>
           <execution>
             <id>add-sources-as-resources</id>
diff --git a/pom.xml b/pom.xml
index a6466a4..f0ecf80 100644
--- a/pom.xml
+++ b/pom.xml
@@ -711,6 +711,11 @@
           <artifactId>maven-enforcer-plugin</artifactId>
           <version>3.0.0-M2</version>
         </plugin>
+        <plugin>
+          <groupId>org.codehaus.mojo</groupId>
+          <artifactId>build-helper-maven-plugin</artifactId>
+          <version>3.0.0</version>
+        </plugin>
         <plugin> <!-- classpath scanning  -->
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>exec-maven-plugin</artifactId>


[drill] 10/10: DRILL-540: Allow querying hive views in Drill

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit 7a25d9d86b9324dc4c3121a51e3d964bdb7b0284
Author: Igor Guzenko <ih...@gmail.com>
AuthorDate: Fri Oct 12 12:02:43 2018 +0300

    DRILL-540: Allow querying hive views in Drill
    
    1. Added DrillHiveViewTable which allows construction of DrillViewTable based
       on Hive metadata
    2. Added initialization of DrillHiveViewTable in HiveSchemaFactory
    3. Extracted conversion of Hive data types from DrillHiveTable
       to HiveToRelDataTypeConverter
    4. Removed throwing of UnsupportedOperationException from HiveStoragePlugin
    5. Added TestHiveViewsSupport and authorization tests
    6. Added closeSilently() method to AutoCloseables
    closes #1559
---
 .../org/apache/drill/common/AutoCloseables.java    |  22 +
 .../planner/types/HiveToRelDataTypeConverter.java  | 158 ++++
 .../drill/exec/store/hive/ColumnListsCache.java    |  35 +-
 .../drill/exec/store/hive/HiveStoragePlugin.java   |  16 +-
 .../exec/store/hive/schema/DrillHiveTable.java     | 166 +---
 .../exec/store/hive/schema/DrillHiveViewTable.java |  89 ++-
 .../exec/store/hive/schema/HiveSchemaFactory.java  |  46 +-
 .../drill/exec/hive/TestHiveViewsSupport.java      | 233 ++++++
 .../exec/hive/TestInfoSchemaOnHiveStorage.java     |  14 +-
 .../hive/BaseTestHiveImpersonation.java            |  44 +-
 .../hive/TestSqlStdBasedAuthorization.java         | 252 +++++--
 .../hive/TestStorageBasedHiveAuthorization.java    | 839 ++++++++++++++-------
 .../exec/sql/hive/TestViewSupportOnHiveTables.java |   5 +-
 .../exec/store/hive/HiveTestDataGenerator.java     |  14 +-
 .../drill/exec/planner/logical/DrillViewTable.java |  14 +-
 logical/pom.xml                                    |   3 -
 16 files changed, 1401 insertions(+), 549 deletions(-)

diff --git a/common/src/main/java/org/apache/drill/common/AutoCloseables.java b/common/src/main/java/org/apache/drill/common/AutoCloseables.java
index 8ca715e..4cdbded 100644
--- a/common/src/main/java/org/apache/drill/common/AutoCloseables.java
+++ b/common/src/main/java/org/apache/drill/common/AutoCloseables.java
@@ -19,12 +19,18 @@ package org.apache.drill.common;
 
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Objects;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utilities for AutoCloseable classes.
  */
 public class AutoCloseables {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(AutoCloseables.class);
+
   public interface Closeable extends AutoCloseable {
     @Override
     void close();
@@ -92,4 +98,20 @@ public class AutoCloseables {
       throw topLevelException;
     }
   }
+
+  /**
+   * Close all without caring about thrown exceptions
+   * @param closeables - array containing auto closeables
+   */
+  public static void closeSilently(AutoCloseable... closeables) {
+    Arrays.stream(closeables).filter(Objects::nonNull)
+        .forEach(target -> {
+          try {
+            target.close();
+          } catch (Exception e) {
+            LOGGER.warn(String.format("Exception was thrown while closing auto closeable: %s", target), e);
+          }
+        });
+  }
+
 }
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/types/HiveToRelDataTypeConverter.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/types/HiveToRelDataTypeConverter.java
new file mode 100644
index 0000000..3b3abf2
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/types/HiveToRelDataTypeConverter.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.types;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.sql.SqlCollation;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.Util;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class is responsible for data type conversions
+ * from {@link org.apache.hadoop.hive.metastore.api.FieldSchema} instances
+ * to {@link  org.apache.calcite.rel.type.RelDataType} instances
+ */
+public class HiveToRelDataTypeConverter {
+
+  private static final Logger logger = LoggerFactory.getLogger(HiveToRelDataTypeConverter.class);
+
+  private static final String UNSUPPORTED_HIVE_DATA_TYPE_ERROR_MSG = "Unsupported Hive data type %s. %n" +
+      "Following Hive data types are supported in Drill INFORMATION_SCHEMA: " +
+      "BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, DATE, TIMESTAMP, BINARY, DECIMAL, STRING, " +
+      "VARCHAR, CHAR, LIST, MAP, STRUCT and UNION";
+
+
+  private final RelDataTypeFactory typeFactory;
+
+  public HiveToRelDataTypeConverter(RelDataTypeFactory typeFactory) {
+    this.typeFactory = typeFactory;
+  }
+
+  /**
+   * Performs conversion from Hive field to nullable RelDataType
+   *
+   * @param field - representation of data type in Hive Metastore
+   * @return appropriate nullable RelDataType for using with Calcite
+   * @throws RuntimeException for unsupported data types, check
+   *         {@link HiveToRelDataTypeConverter#UNSUPPORTED_HIVE_DATA_TYPE_ERROR_MSG}
+   *         for details about supported hive types
+   */
+  public RelDataType convertToNullableRelDataType(FieldSchema field) {
+    TypeInfo fieldTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(field.getType());
+    RelDataType relDataType = convertToRelDataType(fieldTypeInfo);
+    return typeFactory.createTypeWithNullability(relDataType, true);
+  }
+
+  private RelDataType convertToRelDataType(TypeInfo typeInfo) {
+    final Category typeCategory = typeInfo.getCategory();
+    switch (typeCategory) {
+      case PRIMITIVE:
+        return getRelDataType((PrimitiveTypeInfo) typeInfo);
+      case LIST:
+        return getRelDataType((ListTypeInfo) typeInfo);
+      case MAP:
+        return getRelDataType((MapTypeInfo) typeInfo);
+      case STRUCT:
+        return getRelDataType((StructTypeInfo) typeInfo);
+      case UNION:
+        logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
+            "breaking INFORMATION_SCHEMA queries");
+        return typeFactory.createSqlType(SqlTypeName.OTHER);
+    }
+    throw new RuntimeException(String.format(UNSUPPORTED_HIVE_DATA_TYPE_ERROR_MSG, typeCategory));
+  }
+
+  private RelDataType getRelDataType(StructTypeInfo structTypeInfo) {
+    final List<String> fieldNames = structTypeInfo.getAllStructFieldNames();
+    final List<RelDataType> relDataTypes = structTypeInfo.getAllStructFieldTypeInfos().stream()
+        .map(this::convertToRelDataType)
+        .collect(Collectors.toList());
+    return typeFactory.createStructType(relDataTypes, fieldNames);
+  }
+
+  private RelDataType getRelDataType(MapTypeInfo mapTypeInfo) {
+    RelDataType keyType = convertToRelDataType(mapTypeInfo.getMapKeyTypeInfo());
+    RelDataType valueType = convertToRelDataType(mapTypeInfo.getMapValueTypeInfo());
+    return typeFactory.createMapType(keyType, valueType);
+  }
+
+  private RelDataType getRelDataType(ListTypeInfo listTypeInfo) {
+    RelDataType listElemTypeInfo = convertToRelDataType(listTypeInfo.getListElementTypeInfo());
+    return typeFactory.createArrayType(listElemTypeInfo, -1);
+  }
+
+  private RelDataType getRelDataType(PrimitiveTypeInfo primitiveTypeInfo) {
+    final PrimitiveObjectInspector.PrimitiveCategory primitiveCategory = primitiveTypeInfo.getPrimitiveCategory();
+    switch (primitiveCategory) {
+      case STRING:
+      case VARCHAR:
+        return getRelDataType(primitiveTypeInfo, SqlTypeName.VARCHAR);
+      case CHAR:
+        return getRelDataType(primitiveTypeInfo, SqlTypeName.CHAR);
+      case BYTE:
+      case SHORT:
+      case INT:
+        return typeFactory.createSqlType(SqlTypeName.INTEGER);
+      case DECIMAL:
+        return getRelDataType((DecimalTypeInfo) primitiveTypeInfo);
+      case BOOLEAN:
+        return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
+      case LONG:
+        return typeFactory.createSqlType(SqlTypeName.BIGINT);
+      case FLOAT:
+        return typeFactory.createSqlType(SqlTypeName.FLOAT);
+      case DOUBLE:
+        return typeFactory.createSqlType(SqlTypeName.DOUBLE);
+      case DATE:
+        return typeFactory.createSqlType(SqlTypeName.DATE);
+      case TIMESTAMP:
+        return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
+      case BINARY:
+        return typeFactory.createSqlType(SqlTypeName.VARBINARY);
+    }
+    throw new RuntimeException(String.format(UNSUPPORTED_HIVE_DATA_TYPE_ERROR_MSG, primitiveCategory));
+  }
+
+  private RelDataType getRelDataType(PrimitiveTypeInfo pTypeInfo, SqlTypeName typeName) {
+    int maxLen = TypeInfoUtils.getCharacterLengthForType(pTypeInfo);
+    RelDataType relDataType = typeFactory.createSqlType(typeName, maxLen);
+    return typeFactory.createTypeWithCharsetAndCollation(relDataType, Util.getDefaultCharset(),
+        SqlCollation.IMPLICIT);
+  }
+
+  private RelDataType getRelDataType(DecimalTypeInfo decimalTypeInfo) {
+    return typeFactory.createSqlType(SqlTypeName.DECIMAL, decimalTypeInfo.precision(), decimalTypeInfo.scale());
+  }
+
+}
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/ColumnListsCache.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/ColumnListsCache.java
index 4420155..6a5e1c6 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/ColumnListsCache.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/ColumnListsCache.java
@@ -17,15 +17,16 @@
  */
 package org.apache.drill.exec.store.hive;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
-import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
-import org.apache.drill.shaded.guava.com.google.common.collect.Maps;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Table;
 
-import java.util.List;
-import java.util.Map;
-
 /**
  * The class represents "cache" for partition and table columns.
  * Used to reduce physical plan for Hive tables.
@@ -46,8 +47,8 @@ public class ColumnListsCache {
   }
 
   public ColumnListsCache() {
-    this.fields = Lists.newArrayList();
-    this.keys = Maps.newHashMap();
+    this.fields = new ArrayList<>();
+    this.keys = new HashMap<>();
   }
 
   /**
@@ -83,14 +84,22 @@ public class ColumnListsCache {
    * or null if index is negative or greater than fields list size
    */
   public List<FieldSchema> getColumns(int index) {
-    if (index >= 0 && index < fields.size()) {
-      return fields.get(index);
-    } else {
-      return null;
-    }
+   return (index > -1 && index < fields.size()) ? fields.get(index) : null;
+  }
+
+  /**
+   * Safely retrieves Hive table columns from cache.
+   *
+   * @return list of table columns defined in hive
+   */
+  public List<FieldSchema> getTableSchemaColumns() {
+    List<FieldSchema> tableSchemaColumns = getColumns(0);
+    Preconditions.checkNotNull(tableSchemaColumns, "Failed to get columns for Hive table from cache.");
+    return tableSchemaColumns;
   }
 
   public List<List<FieldSchema>> getFields() {
-    return Lists.newArrayList(fields);
+    return new ArrayList<>(fields);
   }
+
 }
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
index a8c789d..9cd556d 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
@@ -27,18 +27,18 @@ import java.util.Set;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 
-import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableSet;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
 
-import org.apache.calcite.schema.Schema.TableType;
 import org.apache.calcite.schema.SchemaPlus;
-
 import org.apache.commons.lang3.StringEscapeUtils;
+
 import org.apache.drill.common.JSONOptions;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.logical.FormatPluginConfig;
+import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.ops.OptimizerRulesContext;
 import org.apache.drill.exec.physical.base.AbstractGroupScan;
@@ -52,9 +52,8 @@ import org.apache.drill.exec.store.SchemaConfig;
 import org.apache.drill.exec.store.StoragePluginOptimizerRule;
 import org.apache.drill.exec.store.dfs.FormatPlugin;
 import org.apache.drill.exec.store.hive.schema.HiveSchemaFactory;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableSet;
 
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -100,11 +99,6 @@ public class HiveStoragePlugin extends AbstractStoragePlugin {
   public HiveScan getPhysicalScan(String userName, JSONOptions selection, List<SchemaPath> columns, SessionOptionManager options) throws IOException {
     HiveReadEntry hiveReadEntry = selection.getListWith(new ObjectMapper(), new TypeReference<HiveReadEntry>(){});
     try {
-      if (hiveReadEntry.getJdbcTableType() == TableType.VIEW) {
-        throw new UnsupportedOperationException(
-            "Querying views created in Hive from Drill is not supported in current version.");
-      }
-
       Map<String, String> confProperties = new HashMap<>();
       if (options != null) {
         String value = StringEscapeUtils.unescapeJava(options.getString(ExecConstants.HIVE_CONF_PROPERTIES));
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
index 8bf4162..5c52435 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
@@ -17,31 +17,21 @@
  */
 package org.apache.drill.exec.store.hive.schema;
 
-import org.apache.calcite.util.Util;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.exec.planner.types.HiveToRelDataTypeConverter;
 import org.apache.drill.exec.store.hive.HiveReadEntry;
 import org.apache.drill.exec.store.hive.HiveStoragePlugin;
 import org.apache.drill.exec.store.hive.HiveTableWithColumnCache;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.sql.SqlCollation;
-import org.apache.calcite.sql.type.SqlTypeName;
-
-import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
 
-public class DrillHiveTable extends DrillTable{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillHiveTable.class);
+public class DrillHiveTable extends DrillTable {
 
   protected final HiveTableWithColumnCache hiveTable;
 
@@ -52,140 +42,14 @@ public class DrillHiveTable extends DrillTable{
 
   @Override
   public RelDataType getRowType(RelDataTypeFactory typeFactory) {
-    List<RelDataType> typeList = Lists.newArrayList();
-    List<String> fieldNameList = Lists.newArrayList();
-
-    List<FieldSchema> hiveFields = hiveTable.getColumnListsCache().getColumns(0);
-    for(FieldSchema hiveField : hiveFields) {
-      fieldNameList.add(hiveField.getName());
-      typeList.add(getNullableRelDataTypeFromHiveType(
-          typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(hiveField.getType())));
-    }
-
-    for (FieldSchema field : hiveTable.getPartitionKeys()) {
-      fieldNameList.add(field.getName());
-      typeList.add(getNullableRelDataTypeFromHiveType(
-          typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(field.getType())));
-    }
-
-    return typeFactory.createStructType(typeList, fieldNameList);
-  }
-
-  private RelDataType getNullableRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
-    RelDataType relDataType = getRelDataTypeFromHiveType(typeFactory, typeInfo);
-    return typeFactory.createTypeWithNullability(relDataType, true);
-  }
-
-  private RelDataType getRelDataTypeFromHivePrimitiveType(RelDataTypeFactory typeFactory, PrimitiveTypeInfo pTypeInfo) {
-    switch(pTypeInfo.getPrimitiveCategory()) {
-      case BOOLEAN:
-        return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
-
-      case BYTE:
-      case SHORT:
-        return typeFactory.createSqlType(SqlTypeName.INTEGER);
-
-      case INT:
-        return typeFactory.createSqlType(SqlTypeName.INTEGER);
-
-      case LONG:
-        return typeFactory.createSqlType(SqlTypeName.BIGINT);
-
-      case FLOAT:
-        return typeFactory.createSqlType(SqlTypeName.FLOAT);
-
-      case DOUBLE:
-        return typeFactory.createSqlType(SqlTypeName.DOUBLE);
-
-      case DATE:
-        return typeFactory.createSqlType(SqlTypeName.DATE);
-
-      case TIMESTAMP:
-        return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
-
-      case BINARY:
-        return typeFactory.createSqlType(SqlTypeName.VARBINARY);
-
-      case DECIMAL: {
-        DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo)pTypeInfo;
-        return typeFactory.createSqlType(SqlTypeName.DECIMAL, decimalTypeInfo.precision(), decimalTypeInfo.scale());
-      }
-
-      case STRING:
-      case VARCHAR: {
-        int maxLen = TypeInfoUtils.getCharacterLengthForType(pTypeInfo);
-        return typeFactory.createTypeWithCharsetAndCollation(
-          typeFactory.createSqlType(SqlTypeName.VARCHAR, maxLen), /*input type*/
-          Util.getDefaultCharset(),
-          SqlCollation.IMPLICIT /* TODO: need to decide if implicit is the correct one */
-        );
-      }
-
-      case CHAR: {
-        int maxLen = TypeInfoUtils.getCharacterLengthForType(pTypeInfo);
-        return typeFactory.createTypeWithCharsetAndCollation(
-          typeFactory.createSqlType(SqlTypeName.CHAR, maxLen), /*input type*/
-          Util.getDefaultCharset(),
-          SqlCollation.IMPLICIT
-        );
-      }
-
-      case UNKNOWN:
-      case VOID:
-      default:
-        throwUnsupportedHiveDataTypeError(pTypeInfo.getPrimitiveCategory().toString());
-    }
-
-    return null;
+    HiveToRelDataTypeConverter dataTypeConverter = new HiveToRelDataTypeConverter(typeFactory);
+    final List<String> fieldNames = new ArrayList<>();
+    final List<RelDataType> fieldTypes = Stream.of(hiveTable.getColumnListsCache().getTableSchemaColumns(), hiveTable.getPartitionKeys())
+            .flatMap(Collection::stream)
+            .peek(hiveField -> fieldNames.add(hiveField.getName()))
+            .map(dataTypeConverter::convertToNullableRelDataType)
+            .collect(Collectors.toList());
+    return typeFactory.createStructType(fieldTypes, fieldNames);
   }
 
-  private RelDataType getRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
-    switch(typeInfo.getCategory()) {
-      case PRIMITIVE:
-        return getRelDataTypeFromHivePrimitiveType(typeFactory, ((PrimitiveTypeInfo) typeInfo));
-
-      case LIST: {
-        ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
-        RelDataType listElemTypeInfo = getRelDataTypeFromHiveType(typeFactory, listTypeInfo.getListElementTypeInfo());
-        return typeFactory.createArrayType(listElemTypeInfo, -1);
-      }
-
-      case MAP: {
-        MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
-        RelDataType keyType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapKeyTypeInfo());
-        RelDataType valueType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapValueTypeInfo());
-        return typeFactory.createMapType(keyType, valueType);
-      }
-
-      case STRUCT: {
-        StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
-        ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
-        ArrayList<TypeInfo> fieldHiveTypeInfoList = structTypeInfo.getAllStructFieldTypeInfos();
-        List<RelDataType> fieldRelDataTypeList = Lists.newArrayList();
-        for(TypeInfo fieldHiveType : fieldHiveTypeInfoList) {
-          fieldRelDataTypeList.add(getRelDataTypeFromHiveType(typeFactory, fieldHiveType));
-        }
-        return typeFactory.createStructType(fieldRelDataTypeList, fieldNames);
-      }
-
-      case UNION:
-        logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
-            "breaking INFORMATION_SCHEMA queries");
-        return typeFactory.createSqlType(SqlTypeName.OTHER);
-    }
-
-    throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
-    return null;
-  }
-
-  private void throwUnsupportedHiveDataTypeError(String hiveType) {
-    StringBuilder errMsg = new StringBuilder();
-    errMsg.append(String.format("Unsupported Hive data type %s. ", hiveType));
-    errMsg.append(System.getProperty("line.separator"));
-    errMsg.append("Following Hive data types are supported in Drill INFORMATION_SCHEMA: ");
-    errMsg.append("BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, DATE, TIMESTAMP, BINARY, DECIMAL, STRING, " +
-        "VARCHAR, CHAR, LIST, MAP, STRUCT and UNION");
-
-    throw new RuntimeException(errMsg.toString());
-  }
 }
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
index aedc5f2..5a9e92d 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
@@ -17,26 +17,91 @@
  */
 package org.apache.drill.exec.store.hive.schema;
 
-import org.apache.calcite.schema.Schema.TableType;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Stream;
 
-import org.apache.drill.exec.planner.logical.DrillViewInfoProvider;
+import org.apache.calcite.plan.RelOptTable;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.sql.type.SqlTypeFactoryImpl;
+import org.apache.drill.exec.dotdrill.View;
+import org.apache.drill.exec.planner.logical.DrillViewTable;
+import org.apache.drill.exec.planner.sql.SchemaUtilites;
+import org.apache.drill.exec.planner.types.DrillRelDataTypeSystem;
+import org.apache.drill.exec.planner.types.HiveToRelDataTypeConverter;
+import org.apache.drill.exec.store.SchemaConfig;
 import org.apache.drill.exec.store.hive.HiveReadEntry;
-import org.apache.drill.exec.store.hive.HiveStoragePlugin;
+import org.apache.drill.exec.store.hive.HiveTableWithColumnCache;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
 
-public class DrillHiveViewTable extends DrillHiveTable implements DrillViewInfoProvider {
+import static java.util.stream.Collectors.toList;
 
-  public DrillHiveViewTable(String storageEngineName, HiveStoragePlugin plugin, String userName,
-      HiveReadEntry readEntry) {
-    super(storageEngineName, plugin, userName, readEntry);
+/**
+ * DrillViewTable which may be created from Hive view metadata and will work
+ * similar to views defined in Drill.
+ */
+public class DrillHiveViewTable extends DrillViewTable {
+
+  private static final HiveToRelDataTypeConverter DATA_TYPE_CONVERTER = new HiveToRelDataTypeConverter(
+      new SqlTypeFactoryImpl(DrillRelDataTypeSystem.DRILL_REL_DATATYPE_SYSTEM));
+
+  public DrillHiveViewTable(HiveReadEntry entry, List<String> schemaPath,
+                            SchemaConfig schemaConfig,
+                            String user) {
+    super(createView(schemaPath, entry.getTable()), user, schemaConfig.getViewExpansionContext());
   }
 
+  /**
+   * Because tables used by hive views, defined without name
+   * of storage plugin, we're making sure that storage plugin
+   * name will be taken into account for the special case,
+   * when hive storage based authorization is used, and user
+   * can query view, but doesn't have rights to  access underlying
+   * table.
+   *
+   * @param context - to rel conversion context
+   * @param rowType - data type of requested columns
+   * @param workspaceSchemaPath - path to view in drill, for example: ["hive"]
+   * @param tokenSchemaTree - schema created for impersonated user
+   * @return - relational representation of expanded Hive view
+   */
   @Override
-  public TableType getJdbcTableType() {
-    return TableType.VIEW;
+  protected RelNode expandViewForImpersonatedUser(RelOptTable.ToRelContext context, RelDataType rowType,
+                                                  List<String> workspaceSchemaPath, SchemaPlus tokenSchemaTree) {
+    SchemaPlus drillHiveSchema = SchemaUtilites.findSchema(tokenSchemaTree, workspaceSchemaPath);
+    workspaceSchemaPath = ImmutableList.of();
+    return super.expandViewForImpersonatedUser(context, rowType, workspaceSchemaPath, drillHiveSchema);
   }
 
-  @Override
-  public String getViewSql() {
-    return hiveTable.getViewExpandedText();
+  /**
+   * Responsible for creation of View based on Hive view metadata.
+   * Usually such instances created as a result of reading .view.drill files.
+   *
+   * @param schemaPath - path to view in drill, for example: ["hive"]
+   * @param hiveView - hive view metadata
+   * @return - View object for further usage
+   */
+  private static View createView(List<String> schemaPath, HiveTableWithColumnCache hiveView) {
+    List<View.FieldType> viewFields = getViewFieldTypes(hiveView);
+    String viewName = hiveView.getTableName();
+    String viewSql = hiveView.getViewExpandedText();
+    return new View(viewName, viewSql, viewFields, schemaPath);
   }
+
+  /**
+   * Helper method for conversion of hive view fields
+   * to drill view fields
+   *
+   * @param hiveTable - hive view metadata
+   * @return - list of fields for construction of View
+   */
+  private static List<View.FieldType> getViewFieldTypes(HiveTableWithColumnCache hiveTable) {
+    return Stream.of(hiveTable.getColumnListsCache().getTableSchemaColumns(), hiveTable.getPartitionKeys())
+        .flatMap(Collection::stream)
+        .map(hiveField -> new View.FieldType(hiveField.getName(), DATA_TYPE_CONVERTER.convertToNullableRelDataType(hiveField)))
+        .collect(toList());
+  }
+
 }
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
index abb8676..402dda2 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
@@ -25,15 +25,12 @@ import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.drill.shaded.guava.com.google.common.cache.CacheBuilder;
-import org.apache.drill.shaded.guava.com.google.common.cache.CacheLoader;
-import org.apache.drill.shaded.guava.com.google.common.cache.LoadingCache;
-import org.apache.drill.shaded.guava.com.google.common.cache.RemovalListener;
 import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.schema.Table;
+import org.apache.drill.common.AutoCloseables;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.planner.logical.DrillTable;
 import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.AbstractSchemaFactory;
 import org.apache.drill.exec.store.SchemaConfig;
@@ -41,12 +38,17 @@ import org.apache.drill.exec.store.hive.DrillHiveMetaStoreClient;
 import org.apache.drill.exec.store.hive.HiveReadEntry;
 import org.apache.drill.exec.store.hive.HiveStoragePlugin;
 import org.apache.drill.exec.store.hive.HiveStoragePluginConfig;
-import org.apache.drill.exec.util.ImpersonationUtil;
+import org.apache.drill.shaded.guava.com.google.common.cache.CacheBuilder;
+import org.apache.drill.shaded.guava.com.google.common.cache.CacheLoader;
+import org.apache.drill.shaded.guava.com.google.common.cache.LoadingCache;
+import org.apache.drill.shaded.guava.com.google.common.cache.RemovalListener;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.thrift.TException;
 
+import static org.apache.drill.exec.util.ImpersonationUtil.getProcessUserName;
+
 public class HiveSchemaFactory extends AbstractSchemaFactory {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveSchemaFactory.class);
 
@@ -104,14 +106,11 @@ public class HiveSchemaFactory extends AbstractSchemaFactory {
    * Close this schema factory in preparation for retrying. Attempt to close
    * connections, but just ignore any errors.
    */
-
   public void close() {
-    try {
-      processUserMetastoreClient.close();
-    } catch (Exception e) { }
-    try {
-      metaStoreClientLoadingCache.invalidateAll();
-    } catch (Exception e) { }
+    AutoCloseables.closeSilently(
+        processUserMetastoreClient::close,
+        metaStoreClientLoadingCache::invalidateAll
+    );
   }
 
   @Override
@@ -160,7 +159,9 @@ public class HiveSchemaFactory extends AbstractSchemaFactory {
       }
     }
 
-    /** Help method to get subschema when we know it exists (already checks the existence) */
+    /**
+     * Helper method to get subschema when we know it exists (already checked the existence)
+     */
     private HiveDatabaseSchema getSubSchemaKnownExists(String name) {
       return new HiveDatabaseSchema(this, name, mClient, schemaConfig);
     }
@@ -208,20 +209,19 @@ public class HiveSchemaFactory extends AbstractSchemaFactory {
       return false;
     }
 
-    DrillTable getDrillTable(String dbName, String t) {
+    Table getDrillTable(String dbName, String t) {
       HiveReadEntry entry = getSelectionBaseOnName(dbName, t);
       if (entry == null) {
         return null;
       }
+      final String schemaUser = schemaConfig.getUserName();
+      return TableType.VIEW == entry.getJdbcTableType()
+          ? new DrillHiveViewTable(entry, schemaPath, schemaConfig, getUser(schemaUser, entry.getTable().getOwner()))
+          : new DrillHiveTable(getName(), plugin, getUser(schemaUser, getProcessUserName()), entry);
+    }
 
-      final String userToImpersonate = needToImpersonateReadingData() ? schemaConfig.getUserName() :
-          ImpersonationUtil.getProcessUserName();
-
-      if (entry.getJdbcTableType() == TableType.VIEW) {
-        return new DrillHiveViewTable(getName(), plugin, userToImpersonate, entry);
-      } else {
-        return new DrillHiveTable(getName(), plugin, userToImpersonate, entry);
-      }
+    private String getUser(String impersonated, String notImpersonated) {
+      return needToImpersonateReadingData() ? impersonated : notImpersonated;
     }
 
     HiveReadEntry getSelectionBaseOnName(String dbName, String t) {
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveViewsSupport.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveViewsSupport.java
new file mode 100644
index 0000000..568a7b0
--- /dev/null
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveViewsSupport.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.hive;
+
+import java.math.BigDecimal;
+
+import org.apache.drill.categories.HiveStorageTest;
+import org.apache.drill.categories.SlowTest;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.expr.fn.impl.DateUtility;
+import org.apache.drill.exec.planner.physical.PlannerSettings;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+
+@Category({SlowTest.class, HiveStorageTest.class})
+public class TestHiveViewsSupport extends HiveTestBase {
+
+  @Test
+  public void selectStarFromView() throws Exception {
+    testBuilder()
+        .sqlQuery("SELECT * FROM hive.hive_view")
+        .unOrdered()
+        .baselineColumns("key", "value")
+        .baselineValues(1, " key_1")
+        .baselineValues(2, " key_2")
+        .baselineValues(3, " key_3")
+        .baselineValues(4, " key_4")
+        .baselineValues(5, " key_5")
+        .go();
+  }
+
+  @Test
+  public void useHiveAndSelectStarFromView() throws Exception {
+    test("USE hive");
+    testBuilder()
+        .sqlQuery("SELECT * FROM hive_view")
+        .unOrdered()
+        .baselineColumns("key", "value")
+        .baselineValues(1, " key_1")
+        .baselineValues(2, " key_2")
+        .baselineValues(3, " key_3")
+        .baselineValues(4, " key_4")
+        .baselineValues(5, " key_5")
+        .go();
+  }
+
+  @Test
+  public void joinViewAndTable() throws Exception {
+    testBuilder()
+        .sqlQuery("SELECT v.key AS key, t.`value` AS val " +
+            "FROM hive.kv t " +
+            "INNER JOIN hive.hive_view v " +
+            "ON v.key = t.key AND t.key=1")
+        .unOrdered()
+        .baselineColumns("key", "val")
+        .baselineValues(1, " key_1")
+        .go();
+  }
+
+  @Test
+  public void nativeParquetScanForView() throws Exception {
+    try {
+      setSessionOption(ExecConstants.HIVE_OPTIMIZE_PARQUET_SCAN_WITH_NATIVE_READER, true);
+      setSessionOption(PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY, true);
+
+      String query = "select * from hive.kv_native_view where key > 1";
+
+      int actualRowCount = testSql(query);
+      assertEquals("Expected and actual row count should match", 2, actualRowCount);
+
+      testPlanMatchingPatterns(query,
+          new String[]{"HiveDrillNativeParquetScan", "numFiles=1"}, null);
+
+    } finally {
+      resetSessionOption(ExecConstants.HIVE_OPTIMIZE_PARQUET_SCAN_WITH_NATIVE_READER);
+      resetSessionOption(PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY);
+    }
+
+  }
+
+  @Test
+  public void viewWithAllSupportedDataTypes() throws Exception {
+    testBuilder().sqlQuery("SELECT * FROM hive.readtest_view")
+        .unOrdered()
+        .baselineColumns(
+            "binary_field",
+            "boolean_field",
+            "tinyint_field",
+            "decimal0_field",
+            "decimal9_field",
+            "decimal18_field",
+            "decimal28_field",
+            "decimal38_field",
+            "double_field",
+            "float_field",
+            "int_field",
+            "bigint_field",
+            "smallint_field",
+            "string_field",
+            "varchar_field",
+            "timestamp_field",
+            "date_field",
+            "char_field",
+            // There is a regression in Hive 1.2.1 in binary type partition columns. Disable for now.
+            //"binary_part",
+            "boolean_part",
+            "tinyint_part",
+            "decimal0_part",
+            "decimal9_part",
+            "decimal18_part",
+            "decimal28_part",
+            "decimal38_part",
+            "double_part",
+            "float_part",
+            "int_part",
+            "bigint_part",
+            "smallint_part",
+            "string_part",
+            "varchar_part",
+            "timestamp_part",
+            "date_part",
+            "char_part")
+        .baselineValues(
+            "binaryfield".getBytes(),
+            false,
+            34,
+            new BigDecimal("66"),
+            new BigDecimal("2347.92"),
+            new BigDecimal("2758725827.99990"),
+            new BigDecimal("29375892739852.8"),
+            new BigDecimal("89853749534593985.783"),
+            8.345d,
+            4.67f,
+            123456,
+            234235L,
+            3455,
+            "stringfield",
+            "varcharfield",
+            DateUtility.parseBest("2013-07-05 17:01:00"),
+            DateUtility.parseLocalDate("2013-07-05"),
+            "charfield",
+            // There is a regression in Hive 1.2.1 in binary type partition columns. Disable for now.
+            //"binary",
+            true,
+            64,
+            new BigDecimal("37"),
+            new BigDecimal("36.90"),
+            new BigDecimal("3289379872.94565"),
+            new BigDecimal("39579334534534.4"),
+            new BigDecimal("363945093845093890.900"),
+            8.345d,
+            4.67f,
+            123456,
+            234235L,
+            3455,
+            "string",
+            "varchar",
+            DateUtility.parseBest("2013-07-05 17:01:00"),
+            DateUtility.parseLocalDate("2013-07-05"),
+            "char")
+        .baselineValues( // All fields are null, but partition fields have non-null values
+            null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null,
+            // There is a regression in Hive 1.2.1 in binary type partition columns. Disable for now.
+            //"binary",
+            true,
+            64,
+            new BigDecimal("37"),
+            new BigDecimal("36.90"),
+            new BigDecimal("3289379872.94565"),
+            new BigDecimal("39579334534534.4"),
+            new BigDecimal("363945093845093890.900"),
+            8.345d,
+            4.67f,
+            123456,
+            234235L,
+            3455,
+            "string",
+            "varchar",
+            DateUtility.parseBest("2013-07-05 17:01:00"),
+            DateUtility.parseLocalDate("2013-07-05"),
+            "char")
+        .go();
+  }
+
+  @Test
+  public void viewOverView() throws Exception {
+    testBuilder()
+        .sqlQuery("SELECT * FROM hive.view_over_hive_view")
+        .unOrdered()
+        .baselineColumns("key", "value")
+        .baselineValues(2, " key_2")
+        .baselineValues(3, " key_3")
+        .go();
+  }
+
+  @Test
+  public void materializedViews() throws Exception {
+    testBuilder()
+        .sqlQuery("SELECT * FROM hive.hive_view_m")
+        .unOrdered()
+        .baselineColumns("key", "value")
+        .baselineValues(1, " key_1")
+        .go();
+  }
+
+  @Test
+  public void viewOverTablesInDifferentSchema() throws Exception {
+    testBuilder()
+        .sqlQuery("SELECT dk_key_count FROM hive.db1.two_table_view")
+        .unOrdered()
+        .baselineColumns("dk_key_count")
+        .baselineValues(5L)
+        .go();
+  }
+
+}
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
index f8b467f..ccd8918 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
@@ -42,7 +42,7 @@ public class TestInfoSchemaOnHiveStorage extends HiveTestBase {
         .baselineValues("hive.default", "readtest_parquet")
         .baselineValues("hive.default", "empty_table")
         .baselineValues("hive.default", "infoschematest")
-        .baselineValues("hive.default", "hiveview")
+        .baselineValues("hive.default", "hive_view")
         .baselineValues("hive.default", "kv")
         .baselineValues("hive.default", "kv_parquet")
         .baselineValues("hive.default", "kv_sh")
@@ -51,6 +51,10 @@ public class TestInfoSchemaOnHiveStorage extends HiveTestBase {
         .baselineValues("hive.default", "kv_native")
         .baselineValues("hive.default", "kv_native_ext")
         .baselineValues("hive.default", "sub_dir_table")
+        .baselineValues("hive.default", "readtest_view")
+        .baselineValues("hive.default", "kv_native_view")
+        .baselineValues("hive.default", "hive_view_m")
+        .baselineValues("hive.default", "view_over_hive_view")
         .go();
 
     testBuilder()
@@ -59,6 +63,7 @@ public class TestInfoSchemaOnHiveStorage extends HiveTestBase {
         .baselineColumns("TABLE_SCHEMA", "TABLE_NAME")
         .baselineValues("hive.db1", "kv_db1")
         .baselineValues("hive.db1", "avro")
+        .baselineValues("hive.db1", "two_table_view")
         .go();
 
     testBuilder()
@@ -242,12 +247,13 @@ public class TestInfoSchemaOnHiveStorage extends HiveTestBase {
         .baselineColumns("TABLE_CATALOG", "TABLE_SCHEMA", "TABLE_NAME", "TABLE_TYPE")
         .baselineValues("DRILL", "hive.db1", "kv_db1", "TABLE")
         .baselineValues("DRILL", "hive.db1", "avro", "TABLE")
+        .baselineValues("DRILL", "hive.db1", "two_table_view", "VIEW")
         .baselineValues("DRILL", "hive.default", "kv", "TABLE")
         .baselineValues("DRILL", "hive.default", "empty_table", "TABLE")
         .baselineValues("DRILL", "hive.default", "readtest", "TABLE")
         .baselineValues("DRILL", "hive.default", "infoschematest", "TABLE")
         .baselineValues("DRILL", "hive.default", "readtest_parquet", "TABLE")
-        .baselineValues("DRILL", "hive.default", "hiveview", "VIEW")
+        .baselineValues("DRILL", "hive.default", "hive_view", "VIEW")
         .baselineValues("DRILL", "hive.default", "partition_pruning_test", "TABLE")
         .baselineValues("DRILL", "hive.default", "partition_with_few_schemas", "TABLE")
         .baselineValues("DRILL", "hive.default", "kv_parquet", "TABLE")
@@ -256,6 +262,10 @@ public class TestInfoSchemaOnHiveStorage extends HiveTestBase {
         .baselineValues("DRILL", "hive.default", "kv_native", "TABLE")
         .baselineValues("DRILL", "hive.default", "kv_native_ext", "TABLE")
         .baselineValues("DRILL", "hive.default", "sub_dir_table", "TABLE")
+        .baselineValues("DRILL", "hive.default", "readtest_view", "VIEW")
+        .baselineValues("DRILL", "hive.default", "kv_native_view", "VIEW")
+        .baselineValues("DRILL", "hive.default", "hive_view_m", "TABLE")
+        .baselineValues("DRILL", "hive.default", "view_over_hive_view", "VIEW")
         .baselineValues("DRILL", "hive.skipper", "kv_text_small", "TABLE")
         .baselineValues("DRILL", "hive.skipper", "kv_text_large", "TABLE")
         .baselineValues("DRILL", "hive.skipper", "kv_incorrect_skip_header", "TABLE")
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/BaseTestHiveImpersonation.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/BaseTestHiveImpersonation.java
index e361c66..53088ed 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/BaseTestHiveImpersonation.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/BaseTestHiveImpersonation.java
@@ -27,12 +27,15 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.shims.ShimLoader;
 
 import java.io.File;
+import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 
+import static org.apache.drill.exec.hive.HiveTestUtilities.executeQuery;
 import static org.apache.drill.exec.store.hive.HiveTestDataGenerator.createFileWithPermissions;
 import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTOREURIS;
@@ -49,10 +52,12 @@ public class BaseTestHiveImpersonation extends BaseTestImpersonation {
   protected static final String studentDef = "CREATE TABLE %s.%s" +
       "(rownum int, name string, age int, gpa float, studentnum bigint) " +
       "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE";
+
   protected static final String voterDef = "CREATE TABLE %s.%s" +
       "(voter_id int,name varchar(30), age tinyint, registration string, " +
       "contributions double,voterzone smallint,create_time timestamp) " +
       "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE";
+
   protected static final String partitionStudentDef = "CREATE TABLE %s.%s" +
       "(rownum INT, name STRING, gpa FLOAT, studentnum BIGINT) " +
       "partitioned by (age INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE";
@@ -132,8 +137,8 @@ public class BaseTestHiveImpersonation extends BaseTestImpersonation {
     testBuilder.go();
   }
 
-  protected void fromInfoSchemaHelper(final String pluginName, final String db, List<String> expectedTables, List<TableType> expectedTableTypes) throws Exception {
-    final String dbQualified = pluginName + "." + db;
+  protected void fromInfoSchemaHelper(final String db, List<String> expectedTables, List<TableType> expectedTableTypes) throws Exception {
+    final String dbQualified = hivePluginName + "." + db;
     final TestBuilder testBuilder = testBuilder()
         .sqlQuery("SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE \n" +
             "FROM INFORMATION_SCHEMA.`TABLES` \n" +
@@ -157,4 +162,39 @@ public class BaseTestHiveImpersonation extends BaseTestImpersonation {
     // exit. As each metastore server instance is using its own resources and not sharing it with other metastore
     // server instances this should be ok.
   }
+
+  static void queryView(String viewName) throws Exception {
+    String query = String.format("SELECT rownum FROM %s.tmp.%s ORDER BY rownum LIMIT 1", MINI_DFS_STORAGE_PLUGIN_NAME, viewName);
+    testBuilder()
+        .sqlQuery(query)
+        .unOrdered()
+        .baselineColumns("rownum")
+        .baselineValues(1)
+        .go();
+  }
+
+  static void queryViewNotAuthorized(String viewName) throws Exception {
+    String query = String.format("SELECT rownum FROM %s.tmp.%s ORDER BY rownum LIMIT 1", MINI_DFS_STORAGE_PLUGIN_NAME, viewName);
+    errorMsgTestHelper(query, String.format(
+        "Not authorized to read view [%s] in schema [%s.tmp]", viewName, MINI_DFS_STORAGE_PLUGIN_NAME));
+  }
+
+  static void createTableWithStoragePermissions(final Driver hiveDriver, final String db, final String tbl, final String tblDef,
+                                                final String tblData, final String user, final String group, final short permissions) throws Exception {
+    createTable(hiveDriver, db, tbl, tblDef, tblData);
+    setStoragePermissions(db, tbl, user, group, permissions);
+  }
+
+  static void setStoragePermissions(String db, String tbl, String user, String group, short permissions) throws IOException {
+    final Path p = getWhPathForHiveObject(db, tbl);
+    fs.setPermission(p, new FsPermission(permissions));
+    fs.setOwner(p, user, group);
+  }
+
+  static void createTable(final Driver driver, final String db, final String tbl, final String tblDef,
+                          final String data) throws Exception {
+    executeQuery(driver, String.format(tblDef, db, tbl));
+    executeQuery(driver, String.format("LOAD DATA LOCAL INPATH '%s' INTO TABLE %s.%s", data, db, tbl));
+  }
+
 }
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestSqlStdBasedAuthorization.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestSqlStdBasedAuthorization.java
index 08469fc..ca16677 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestSqlStdBasedAuthorization.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestSqlStdBasedAuthorization.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHive
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -55,9 +56,16 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
 
   // Tables in "db_general"
   private static final String g_student_user0 = "student_user0";
+
+  private static final String vw_student_user0 = "vw_student_user0";
+
   private static final String g_voter_role0 = "voter_role0";
+
+  private static final String vw_voter_role0 = "vw_voter_role0";
+
   private static final String g_student_user2 = "student_user2";
 
+  private static final String vw_student_user2 = "vw_student_user2";
 
   // Create a view on "g_student_user0". View is owned by user0:group0 and has permissions 750
   private static final String v_student_u0g0_750 = "v_student_u0g0_750";
@@ -65,12 +73,6 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
   // Create a view on "v_student_u0g0_750". View is owned by user1:group1 and has permissions 750
   private static final String v_student_u1g1_750 = "v_student_u1g1_750";
 
-  private static final String query_v_student_u0g0_750 = String.format(
-      "SELECT rownum FROM %s.%s.%s ORDER BY rownum LIMIT 1", MINI_DFS_STORAGE_PLUGIN_NAME, "tmp", v_student_u0g0_750);
-
-  private static final String query_v_student_u1g1_750 = String.format(
-      "SELECT rownum FROM %s.%s.%s ORDER BY rownum LIMIT 1", MINI_DFS_STORAGE_PLUGIN_NAME, "tmp", v_student_u1g1_750);
-
   // Role for testing purpose
   private static final String test_role0 = "role0";
 
@@ -110,24 +112,59 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
     return hiveConfig;
   }
 
+
+  /*
+   * Generating database objects with permissions:
+   * <p>
+   * |                                         | org1Users[0] | org1Users[1] | org1Users[2]
+   * ---------------------------------------------------------------------------------------
+   * db_general.g_student_user0                |      +       |      -       |      -       |
+   * db_general.g_voter_role0                  |      -       |      +       |      +       |
+   * db_general.g_student_user2                |      -       |      -       |      +       |
+   * |                                         |              |              |              |
+   * mini_dfs_plugin.tmp.v_student_u0g0_750    |      +       |      +       |      -       |
+   * mini_dfs_plugin.tmp.v_student_u1g1_750    |      -       |      +       |      +       |
+   * |                                         |              |              |              |
+   * db_general.vw_student_user0               |      +       |      -       |      -       |
+   * db_general.vw_voter_role0                 |      -       |      +       |      +       |
+   * db_general.vw_student_user2               |      -       |      -       |      +       |
+   * ---------------------------------------------------------------------------------------
+   *
+   * @throws Exception - if view creation failed
+   */
   private static void generateTestData() throws Exception {
     final SessionState ss = new SessionState(hiveConf);
     SessionState.start(ss);
     final Driver driver = new Driver(hiveConf);
 
     executeQuery(driver, "CREATE DATABASE " + db_general);
-    createTbl(driver, db_general, g_student_user0, studentDef, studentData);
-    createTbl(driver, db_general, g_voter_role0, voterDef, voterData);
-    createTbl(driver, db_general, g_student_user2, studentDef, studentData);
+    createTable(driver, db_general, g_student_user0, studentDef, studentData);
+    createTable(driver, db_general, g_voter_role0, voterDef, voterData);
+    createTable(driver, db_general, g_student_user2, studentDef, studentData);
+
+    createHiveView(driver, db_general, vw_student_user0, g_student_user0);
+    createHiveView(driver, db_general, vw_voter_role0, g_voter_role0);
+    createHiveView(driver, db_general, vw_student_user2, g_student_user2);
 
     executeQuery(driver, "SET ROLE admin");
     executeQuery(driver, "CREATE ROLE " + test_role0);
     executeQuery(driver, "GRANT ROLE " + test_role0 + " TO USER " + org1Users[1]);
     executeQuery(driver, "GRANT ROLE " + test_role0 + " TO USER " + org1Users[2]);
 
-    executeQuery(driver, String.format("GRANT SELECT ON %s.%s TO USER %s", db_general, g_student_user0, org1Users[0]));
-    executeQuery(driver, String.format("GRANT SELECT ON %s.%s TO ROLE %s", db_general, g_voter_role0, test_role0));
-    executeQuery(driver, String.format("GRANT SELECT ON %s.%s TO USER %s", db_general, g_student_user2, org1Users[2]));
+    executeQuery(driver, String.format("GRANT SELECT ON db_general.%s TO USER %s",
+        g_student_user0, org1Users[0]));
+    executeQuery(driver, String.format("GRANT SELECT ON db_general.%s TO USER %s",
+        vw_student_user0, org1Users[0]));
+
+    executeQuery(driver, String.format("GRANT SELECT ON db_general.%s TO ROLE %s",
+        g_voter_role0, test_role0));
+    executeQuery(driver, String.format("GRANT SELECT ON db_general.%s TO ROLE %s",
+        vw_voter_role0, test_role0));
+
+    executeQuery(driver, String.format("GRANT SELECT ON db_general.%s TO USER %s",
+        g_student_user2, org1Users[2]));
+    executeQuery(driver, String.format("GRANT SELECT ON db_general.%s TO USER %s",
+        vw_student_user2, org1Users[2]));
 
     createView(org1Users[0], org1Groups[0], v_student_u0g0_750,
         String.format("SELECT rownum, name, age, studentnum FROM %s.%s.%s",
@@ -137,14 +174,9 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
         String.format("SELECT rownum, name, age FROM %s.%s.%s", MINI_DFS_STORAGE_PLUGIN_NAME, "tmp", v_student_u0g0_750));
   }
 
-  private static void createTbl(final Driver driver, final String db, final String tbl, final String tblDef,
-      final String data) throws Exception {
-    executeQuery(driver, String.format(tblDef, db, tbl));
-    executeQuery(driver, String.format("LOAD DATA LOCAL INPATH '%s' INTO TABLE %s.%s", data, db, tbl));
-  }
-
   // Irrespective of each db permissions, all dbs show up in "SHOW SCHEMAS"
   @Test
+  @Ignore //todo: enable after fix of DRILL-6923
   public void showSchemas() throws Exception {
     testBuilder()
         .sqlQuery("SHOW SCHEMAS LIKE 'hive.%'")
@@ -156,31 +188,22 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
   }
 
   @Test
-  public void showTables_user0() throws Exception {
+  public void user0_showTables() throws Exception {
     updateClient(org1Users[0]);
     showTablesHelper(db_general,
         // Users are expected to see all tables in a database even if they don't have permissions to read from tables.
         ImmutableList.of(
             g_student_user0,
             g_student_user2,
-            g_voter_role0
-        ));
-  }
-
-  @Test
-  public void showTables_user1() throws Exception {
-    updateClient(org1Users[1]);
-    showTablesHelper(db_general,
-        // Users are expected to see all tables in a database even if they don't have permissions to read from tables.
-        ImmutableList.of(
-            g_student_user0,
-            g_student_user2,
-            g_voter_role0
+            g_voter_role0,
+            vw_student_user0,
+            vw_voter_role0,
+            vw_student_user2
         ));
   }
 
   @Test
-  public void select_user0_1() throws Exception {
+  public void user0_allowed_g_student_user0() throws Exception {
     // SELECT on "student_user0" table is granted to user "user0"
     updateClient(org1Users[0]);
     test("USE " + hivePluginName + "." + db_general);
@@ -188,7 +211,12 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
   }
 
   @Test
-  public void select_user0_2() throws Exception {
+  public void user0_allowed_vw_student_user0() throws Exception {
+    queryHiveView(org1Users[0], vw_student_user0);
+  }
+
+  @Test
+  public void user0_forbidden_g_voter_role0() throws Exception {
     // SELECT on table "student_user0" is NOT granted to user "user0" directly or indirectly through role "role0" as
     // user "user0" is not part of role "role0"
     updateClient(org1Users[0]);
@@ -199,7 +227,39 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
   }
 
   @Test
-  public void select_user1_1() throws Exception {
+  public void user0_forbidden_vw_voter_role0() throws Exception {
+    queryHiveViewNotAuthorized(org1Users[0], vw_voter_role0);
+  }
+
+  @Test
+  public void user0_forbidden_v_student_u1g1_750() throws Exception {
+    updateClient(org1Users[0]);
+    queryViewNotAuthorized(v_student_u1g1_750);
+  }
+
+  @Test
+  public void user0_allowed_v_student_u0g0_750() throws Exception {
+    updateClient(org1Users[0]);
+    queryView(v_student_u0g0_750);
+  }
+
+  @Test
+  public void user1_showTables() throws Exception {
+    updateClient(org1Users[1]);
+    showTablesHelper(db_general,
+        // Users are expected to see all tables in a database even if they don't have permissions to read from tables.
+        ImmutableList.of(
+            g_student_user0,
+            g_student_user2,
+            g_voter_role0,
+            vw_student_user0,
+            vw_voter_role0,
+            vw_student_user2
+        ));
+  }
+
+  @Test
+  public void user1_forbidden_g_student_user0() throws Exception {
     // SELECT on table "student_user0" is NOT granted to user "user1"
     updateClient(org1Users[1]);
     test("USE " + hivePluginName + "." + db_general);
@@ -209,7 +269,12 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
   }
 
   @Test
-  public void select_user1_2() throws Exception {
+  public void user1_forbidden_vw_student_user0() throws Exception {
+    queryHiveViewNotAuthorized(org1Users[1], vw_student_user0);
+  }
+
+  @Test
+  public void user1_allowed_g_voter_role0() throws Exception {
     // SELECT on "voter_role0" table is granted to role "role0" and user "user1" is part the role "role0"
     updateClient(org1Users[1]);
     test("USE " + hivePluginName + "." + db_general);
@@ -217,7 +282,12 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
   }
 
   @Test
-  public void select_user1_3() throws Exception {
+  public void user1_allowed_vw_voter_role0() throws Exception {
+    queryHiveView(org1Users[1], vw_voter_role0);
+  }
+
+  @Test
+  public void user1_allowed_g_voter_role0_but_forbidden_g_student_user2() throws Exception {
     // SELECT on "voter_role0" table is granted to role "role0" and user "user1" is part the role "role0"
     // SELECT on "student_user2" table is NOT granted to either role "role0" or user "user1"
     updateClient(org1Users[1]);
@@ -229,72 +299,81 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
   }
 
   @Test
-  public void select_user2_1() throws Exception {
-    // SELECT on "voter_role0" table is granted to role "role0" and user "user2" is part the role "role0"
-    updateClient(org1Users[2]);
+  public void user1_allowed_vw_voter_role0_but_forbidden_vw_student_user2() throws Exception {
+    // SELECT on "vw_voter_role0" table is granted to role "role0" and user "user1" is part the role "role0"
+    // SELECT on "vw_student_user2" table is NOT granted to either role "role0" or user "user1"
+    updateClient(org1Users[1]);
     test("USE " + hivePluginName + "." + db_general);
-    test(String.format("SELECT * FROM %s ORDER BY name LIMIT 2", g_voter_role0));
+    final String query =
+        String.format("SELECT * FROM %s v JOIN %s s on v.name = s.name limit 2;", vw_voter_role0, vw_student_user2);
+    errorMsgTestHelper(query, "Principal [name=user1_1, type=USER] does not have following privileges for " +
+        "operation QUERY [[SELECT] on Object [type=TABLE_OR_VIEW, name=db_general.vw_student_user2]]");
   }
 
   @Test
-  public void select_user2_2() throws Exception {
-    // SELECT on "student_user2" table is granted to user "user2"
-    updateClient(org1Users[2]);
-    test("USE " + hivePluginName + "." + db_general);
-    test(String.format("SELECT * FROM %s ORDER BY name LIMIT 2", g_student_user2));
+  public void user1_allowed_v_student_u0g0_750() throws Exception {
+    updateClient(org1Users[1]);
+    queryView(v_student_u0g0_750);
+  }
+
+  @Test
+  public void user1_allowed_v_student_u1g1_750() throws Exception {
+    updateClient(org1Users[1]);
+    queryView(v_student_u1g1_750);
   }
 
   @Test
-  public void select_user2_3() throws Exception {
+  public void user2_allowed_g_voter_role0() throws Exception {
     // SELECT on "voter_role0" table is granted to role "role0" and user "user2" is part the role "role0"
-    // SELECT on "student_user2" table is granted to user "user2"
     updateClient(org1Users[2]);
     test("USE " + hivePluginName + "." + db_general);
-    test(String.format("SELECT * FROM %s v JOIN %s s on v.name = s.name limit 2;", g_voter_role0, g_student_user2));
+    test(String.format("SELECT * FROM %s ORDER BY name LIMIT 2", g_voter_role0));
   }
 
-  private static void queryViewHelper(final String queryUser, final String query) throws Exception {
-    updateClient(queryUser);
-    testBuilder()
-        .sqlQuery(query)
-        .unOrdered()
-        .baselineColumns("rownum")
-        .baselineValues(1)
-        .go();
+  @Test
+  public void user2_allowed_vw_voter_role0() throws Exception {
+    queryHiveView(org1Users[2], vw_voter_role0);
   }
 
   @Test
-  public void selectUser0_v_student_u0g0_750() throws Exception {
-    queryViewHelper(org1Users[0], query_v_student_u0g0_750);
+  public void user2_allowed_g_student_user2() throws Exception {
+    // SELECT on "student_user2" table is granted to user "user2"
+    updateClient(org1Users[2]);
+    test("USE " + hivePluginName + "." + db_general);
+    test(String.format("SELECT * FROM %s ORDER BY name LIMIT 2", g_student_user2));
   }
 
   @Test
-  public void selectUser1_v_student_u0g0_750() throws Exception {
-    queryViewHelper(org1Users[1], query_v_student_u0g0_750);
+  public void user2_allowed_vw_student_user2() throws Exception {
+    queryHiveView(org1Users[2], vw_student_user2);
   }
 
   @Test
-  public void selectUser2_v_student_u0g0_750() throws Exception {
+  public void user2_allowed_g_voter_role0_and_g_student_user2() throws Exception {
+    // SELECT on "voter_role0" table is granted to role "role0" and user "user2" is part the role "role0"
+    // SELECT on "student_user2" table is granted to user "user2"
     updateClient(org1Users[2]);
-    errorMsgTestHelper(query_v_student_u0g0_750, String.format(
-        "Not authorized to read view [v_student_u0g0_750] in schema [%s.tmp]", MINI_DFS_STORAGE_PLUGIN_NAME));
+    test("USE " + hivePluginName + "." + db_general);
+    test(String.format("SELECT * FROM %s v JOIN %s s on v.name = s.name limit 2;", g_voter_role0, g_student_user2));
   }
 
   @Test
-  public void selectUser0_v_student_u1g1_750() throws Exception {
-    updateClient(org1Users[0]);
-    errorMsgTestHelper(query_v_student_u1g1_750, String.format(
-        "Not authorized to read view [v_student_u1g1_750] in schema [%s.tmp]", MINI_DFS_STORAGE_PLUGIN_NAME));
+  public void user2_allowed_vw_voter_role0_and_vw_student_user2() throws Exception {
+    updateClient(org1Users[2]);
+    test("USE " + hivePluginName + "." + db_general);
+    test(String.format("SELECT * FROM %s v JOIN %s s on v.name = s.name limit 2;", vw_voter_role0, vw_student_user2));
   }
 
   @Test
-  public void selectUser1_v_student_u1g1_750() throws Exception {
-    queryViewHelper(org1Users[1], query_v_student_u1g1_750);
+  public void user2_forbidden_v_student_u0g0_750() throws Exception {
+    updateClient(org1Users[2]);
+    queryViewNotAuthorized(v_student_u0g0_750);
   }
 
   @Test
-  public void selectUser2_v_student_u1g1_750() throws Exception {
-    queryViewHelper(org1Users[2], query_v_student_u1g1_750);
+  public void user2_allowed_v_student_u1g1_750() throws Exception {
+    updateClient(org1Users[2]);
+    queryView(v_student_u1g1_750);
   }
 
   @AfterClass
@@ -302,4 +381,33 @@ public class TestSqlStdBasedAuthorization extends BaseTestHiveImpersonation {
     stopMiniDfsCluster();
     stopHiveMetaStore();
   }
+
+  private static void queryHiveView(String usr, String viewName) throws Exception {
+    String query = String.format("SELECT COUNT(*) AS rownum FROM %s.%s.%s",
+        hivePluginName, db_general, viewName);
+    updateClient(usr);
+    testBuilder()
+        .sqlQuery(query)
+        .unOrdered()
+        .baselineColumns("rownum")
+        .baselineValues(1L)
+        .go();
+  }
+
+  private static void queryHiveViewNotAuthorized(String usr, String viewName) throws Exception {
+    final String query = String.format("SELECT * FROM %s.%s.%s", hivePluginName, db_general, viewName);
+    final String expectedError = String.format("Principal [name=%s, type=USER] does not have following privileges for " +
+            "operation QUERY [[SELECT] on Object [type=TABLE_OR_VIEW, name=db_general.%s]]\n",
+        usr, viewName);
+
+    updateClient(usr);
+    errorMsgTestHelper(query, expectedError);
+  }
+
+  private static void createHiveView(Driver driver, String db, String viewName, String tblName) {
+    String viewFullName = db + "." + viewName;
+    String tblFullName = db + "." + tblName;
+    executeQuery(driver, String.format("CREATE OR REPLACE VIEW %s AS SELECT * FROM %s LIMIT 1", viewFullName, tblFullName));
+  }
+
 }
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestStorageBasedHiveAuthorization.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestStorageBasedHiveAuthorization.java
index f4389f8..684519a 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestStorageBasedHiveAuthorization.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/impersonation/hive/TestStorageBasedHiveAuthorization.java
@@ -17,13 +17,15 @@
  */
 package org.apache.drill.exec.impersonation.hive;
 
-import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
-import org.apache.drill.shaded.guava.com.google.common.collect.Maps;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
 
 import org.apache.calcite.schema.Schema.TableType;
 import org.apache.drill.categories.HiveStorageTest;
-import org.apache.drill.exec.store.dfs.WorkspaceConfig;
 import org.apache.drill.categories.SlowTest;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
+import org.apache.drill.shaded.guava.com.google.common.collect.Maps;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.ql.Driver;
@@ -36,11 +38,11 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.util.Collections;
-import java.util.Map;
-
+import static java.util.Collections.emptyList;
 import static org.apache.drill.exec.hive.HiveTestUtilities.executeQuery;
+import static org.apache.drill.shaded.guava.com.google.common.collect.Lists.newArrayList;
 import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONINGMODE;
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_CBO_ENABLED;
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER;
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS;
@@ -51,7 +53,6 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_AUTO_CREAT
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI;
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS;
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION;
-import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONINGMODE;
 
 @Category({SlowTest.class, HiveStorageTest.class})
 public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation {
@@ -61,12 +62,12 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
 
   // Tables in "db_general"
   private static final String g_student_u0_700 = "student_u0_700";
+  private static final String g_vw_g_student_u0_700 = "vw_u0_700_student_u0_700";
   private static final String g_student_u0g0_750 = "student_u0g0_750";
   private static final String g_student_all_755 = "student_all_755";
   private static final String g_voter_u1_700 = "voter_u1_700";
   private static final String g_voter_u2g1_750 = "voter_u2g1_750";
   private static final String g_voter_all_755 = "voter_all_755";
-
   private static final String g_partitioned_student_u0_700 = "partitioned_student_u0_700";
 
   // DB whose warehouse directory has permissions 700 and owned by user0
@@ -75,6 +76,7 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
   // Tables in "db_u0_only"
   private static final String u0_student_all_755 = "student_all_755";
   private static final String u0_voter_all_755 = "voter_all_755";
+  private static final String u0_vw_voter_all_755 = "vw_voter_all_755";
 
   // DB whose warehouse directory has permissions 750 and owned by user1 and group1
   private static final String db_u1g1_only = "db_u1g1_only";
@@ -91,25 +93,26 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
   // Create a view on "v_student_u0g0_750". View is owned by user1:group1 and has permissions 750
   private static final String v_student_u1g1_750 = "v_student_u1g1_750";
 
-  private static final String query_v_student_u0g0_750 = String.format(
-      "SELECT rownum FROM %s.%s.%s ORDER BY rownum LIMIT 1", MINI_DFS_STORAGE_PLUGIN_NAME, "tmp", v_student_u0g0_750);
-
-  private static final String query_v_student_u1g1_750 = String.format(
-      "SELECT rownum FROM %s.%s.%s ORDER BY rownum LIMIT 1", MINI_DFS_STORAGE_PLUGIN_NAME, "tmp", v_student_u1g1_750);
-
   // Create a view on "partitioned_student_u0_700". View is owned by user0:group0 and has permissions 750
   private static final String v_partitioned_student_u0g0_750 = "v_partitioned_student_u0g0_750";
 
   // Create a view on "v_partitioned_student_u0g0_750". View is owned by user1:group1 and has permissions 750
   private static final String v_partitioned_student_u1g1_750 = "v_partitioned_student_u1g1_750";
 
-  private static final String query_v_partitioned_student_u0g0_750 = String.format(
-      "SELECT rownum FROM %s.%s.%s ORDER BY rownum LIMIT 1", MINI_DFS_STORAGE_PLUGIN_NAME, "tmp",
-      v_partitioned_student_u0g0_750);
+  // rwx  -   -
+  // 1. Only owning user have read, write and execute rights
+  private static final short _700 = (short) 0700;
+
+  // rwx  r-x -
+  // 1. Owning user have read, write and execute rights
+  // 2. Owning group have read and execute rights
+  private static final short _750 = (short) 0750;
 
-  private static final String query_v_partitioned_student_u1g1_750 = String.format(
-      "SELECT rownum FROM %s.%s.%s ORDER BY rownum LIMIT 1", MINI_DFS_STORAGE_PLUGIN_NAME, "tmp",
-      v_partitioned_student_u1g1_750);
+  // rwx  r-x r-x
+  // 1. Owning user have read, write and execute rights
+  // 2. Owning group have read and execute rights
+  // 3. Others have read and execute rights
+  private static final short _755 = (short) 0755;
 
   @BeforeClass
   public static void setup() throws Exception {
@@ -119,7 +122,7 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
     startHiveMetaStore();
     startDrillCluster(true);
     addHiveStoragePlugin(getHivePluginConfig());
-    addMiniDfsBasedStorage(Maps.<String, WorkspaceConfig>newHashMap());
+    addMiniDfsBasedStorage(new HashMap<>());
     generateTestData();
   }
 
@@ -145,6 +148,41 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
     return hiveConfig;
   }
 
+  /*
+   * User       Groups
+   * <br/>
+   * user0  |   group0
+   * user1  |   group0, group1
+   * user2  |   group1, group2
+   *
+   * Generating database objects with permissions:
+   * <p>
+   * |                                         | org1Users[0] | org1Users[1] | org1Users[2]
+   * ---------------------------------------------------------------------------------------
+   * db_general                                |      +       |      +       |      +       |
+   * db_general.g_student_u0_700               |      +       |      -       |      -       |
+   * db_general.g_student_u0g0_750             |      +       |      +       |      -       |
+   * db_general.g_student_all_755              |      +       |      +       |      +       |
+   * db_general.g_voter_u1_700                 |      -       |      +       |      -       |
+   * db_general.g_voter_u2g1_750               |      -       |      +       |      +       |
+   * db_general.g_voter_all_755                |      +       |      +       |      +       |
+   * db_general.g_partitioned_student_u0_700   |      +       |      -       |      -       |
+   * db_general.g_vw_g_student_u0_700          |      +       |      -       |      -       |
+   * |                                         |              |              |              |
+   * db_u0_only                                |      +       |      -       |      -       |
+   * db_u0_only.u0_student_all_755             |      +       |      -       |      -       |
+   * db_u0_only.u0_voter_all_755               |      +       |      -       |      -       |
+   * db_u0_only.u0_vw_voter_all_755            |      +       |      -       |      -       |
+   * |                                         |              |              |              |
+   * db_u1g1_only                              |      -       |      +       |      +       |
+   * db_u1g1_only.u1g1_student_all_755         |      -       |      +       |      +       |
+   * db_u1g1_only.u1g1_student_u1_700          |      -       |      +       |      -       |
+   * db_u1g1_only.u1g1_voter_all_755           |      -       |      +       |      +       |
+   * db_u1g1_only.u1g1_voter_u1_700            |      -       |      +       |      -       |
+   * ---------------------------------------------------------------------------------------
+   *
+   * @throws Exception - if view creation failed
+   */
   private static void generateTestData() throws Exception {
 
     // Generate Hive test tables
@@ -153,46 +191,85 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
     final Driver driver = new Driver(hiveConf);
 
     executeQuery(driver, "CREATE DATABASE " + db_general);
-
-    createTable(driver,
-        db_general, g_student_u0_700, studentDef, studentData, org1Users[0], org1Groups[0], (short) 0700);
-    createTable(driver,
-        db_general, g_student_u0g0_750, studentDef, studentData, org1Users[0], org1Groups[0], (short) 0750);
-    createTable(driver,
-        db_general, g_student_all_755, studentDef, studentData, org1Users[2], org1Groups[2], (short) 0755);
-    createTable(driver,
-        db_general, g_voter_u1_700, voterDef, voterData, org1Users[1], org1Groups[1], (short) 0700);
-    createTable(driver,
-        db_general, g_voter_u2g1_750, voterDef, voterData, org1Users[2], org1Groups[1], (short) 0750);
-    createTable(driver,
-        db_general, g_voter_all_755, voterDef, voterData, org1Users[1], org1Groups[1], (short) 0755);
+    createTableWithStoragePermissions(driver,
+        db_general, g_student_u0_700,
+        studentDef, studentData,
+        org1Users[0], org1Groups[0],
+        _700);
+    createHiveView(driver, db_general,
+        g_vw_g_student_u0_700, g_student_u0_700);
+
+    createTableWithStoragePermissions(driver,
+        db_general, g_student_u0g0_750,
+        studentDef, studentData,
+        org1Users[0], org1Groups[0],
+        _750);
+    createTableWithStoragePermissions(driver,
+        db_general, g_student_all_755,
+        studentDef, studentData,
+        org1Users[2], org1Groups[2],
+        _755);
+    createTableWithStoragePermissions(driver,
+        db_general, g_voter_u1_700,
+        voterDef, voterData,
+        org1Users[1], org1Groups[1],
+        _700);
+    createTableWithStoragePermissions(driver,
+        db_general, g_voter_u2g1_750,
+        voterDef, voterData,
+        org1Users[2], org1Groups[1],
+        _750);
+    createTableWithStoragePermissions(driver,
+        db_general, g_voter_all_755,
+        voterDef, voterData,
+        org1Users[1], org1Groups[1],
+        _755);
 
     createPartitionedTable(driver,
-        db_general, g_partitioned_student_u0_700, partitionStudentDef,
-        "INSERT OVERWRITE TABLE %s.%s PARTITION(age) SELECT rownum, name, age, gpa, studentnum FROM %s.%s",
-        g_student_all_755, org1Users[0], org1Groups[0], (short) 0700);
+        org1Users[0], org1Groups[0]
+    );
 
-    changeDBPermissions(db_general, (short) 0755, org1Users[0], org1Groups[0]);
+    changeDBPermissions(db_general, _755, org1Users[0], org1Groups[0]);
 
     executeQuery(driver, "CREATE DATABASE " + db_u1g1_only);
 
-    createTable(driver,
-        db_u1g1_only, u1g1_student_all_755, studentDef, studentData, org1Users[1], org1Groups[1], (short) 0755);
-    createTable(driver,
-        db_u1g1_only, u1g1_student_u1_700, studentDef, studentData, org1Users[1], org1Groups[1], (short) 0700);
-    createTable(driver,
-        db_u1g1_only, u1g1_voter_all_755, voterDef, voterData, org1Users[1], org1Groups[1], (short) 0755);
-    createTable(driver,
-        db_u1g1_only, u1g1_voter_u1_700, voterDef, voterData, org1Users[1], org1Groups[1], (short) 0700);
+    createTableWithStoragePermissions(driver,
+        db_u1g1_only, u1g1_student_all_755,
+        studentDef, studentData,
+        org1Users[1], org1Groups[1],
+        _755);
+    createTableWithStoragePermissions(driver,
+        db_u1g1_only, u1g1_student_u1_700,
+        studentDef, studentData,
+        org1Users[1], org1Groups[1],
+        _700);
+    createTableWithStoragePermissions(driver,
+        db_u1g1_only, u1g1_voter_all_755,
+        voterDef, voterData,
+        org1Users[1], org1Groups[1],
+        _755);
+    createTableWithStoragePermissions(driver,
+        db_u1g1_only, u1g1_voter_u1_700,
+        voterDef, voterData,
+        org1Users[1], org1Groups[1],
+        _700);
+
+    changeDBPermissions(db_u1g1_only, _750, org1Users[1], org1Groups[1]);
 
-    changeDBPermissions(db_u1g1_only, (short) 0750, org1Users[1], org1Groups[1]);
 
     executeQuery(driver, "CREATE DATABASE " + db_u0_only);
-
-    createTable(driver, db_u0_only, u0_student_all_755, studentDef, studentData, org1Users[0], org1Groups[0], (short) 0755);
-    createTable(driver, db_u0_only, u0_voter_all_755, voterDef, voterData, org1Users[0], org1Groups[0], (short) 0755);
-
-    changeDBPermissions(db_u0_only, (short) 0700, org1Users[0], org1Groups[0]);
+    createTableWithStoragePermissions(driver,
+        db_u0_only, u0_student_all_755,
+        studentDef, studentData,
+        org1Users[0], org1Groups[0],
+        _755);
+    createTableWithStoragePermissions(driver,
+        db_u0_only, u0_voter_all_755,
+        voterDef, voterData,
+        org1Users[0], org1Groups[0],
+        _755);
+    createHiveView(driver, db_u0_only, u0_vw_voter_all_755, u0_voter_all_755);
+    changeDBPermissions(db_u0_only, _700, org1Users[0], org1Groups[0]);
 
     createView(org1Users[0], org1Groups[0], v_student_u0g0_750,
         String.format("SELECT rownum, name, age, studentnum FROM %s.%s.%s",
@@ -209,33 +286,27 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
         String.format("SELECT rownum, name, age FROM %s.%s.%s", MINI_DFS_STORAGE_PLUGIN_NAME, "tmp", v_partitioned_student_u0g0_750));
   }
 
-  private static void createPartitionedTable(final Driver hiveDriver, final String db, final String tbl,
-      final String tblDef, final String loadTblDef, final String loadTbl, final String user, final String group,
-      final short permissions) throws Exception {
-    executeQuery(hiveDriver, String.format(tblDef, db, tbl));
-    executeQuery(hiveDriver, String.format(loadTblDef, db, tbl, db, loadTbl));
-
-    final Path p = getWhPathForHiveObject(db, tbl);
-    fs.setPermission(p, new FsPermission(permissions));
-    fs.setOwner(p, user, group);
-  }
-
-  private static void createTable(final Driver hiveDriver, final String db, final String tbl, final String tblDef,
-      final String tblData, final String user, final String group, final short permissions) throws Exception {
-    executeQuery(hiveDriver, String.format(tblDef, db, tbl));
-    executeQuery(hiveDriver, String.format("LOAD DATA LOCAL INPATH '%s' INTO TABLE %s.%s", tblData, db, tbl));
-    final Path p = getWhPathForHiveObject(db, tbl);
-    fs.setPermission(p, new FsPermission(permissions));
+  private static void createPartitionedTable(final Driver hiveDriver, final String user, final String group) throws Exception {
+    executeQuery(hiveDriver, String.format(partitionStudentDef, db_general, g_partitioned_student_u0_700));
+    executeQuery(hiveDriver, String.format("INSERT OVERWRITE TABLE %s.%s PARTITION(age) SELECT rownum, name, age, gpa, studentnum FROM %s.%s",
+        db_general, g_partitioned_student_u0_700, db_general, g_student_all_755));
+    final Path p = getWhPathForHiveObject(TestStorageBasedHiveAuthorization.db_general, TestStorageBasedHiveAuthorization.g_partitioned_student_u0_700);
+    fs.setPermission(p, new FsPermission(TestStorageBasedHiveAuthorization._700));
     fs.setOwner(p, user, group);
   }
 
-  private static void changeDBPermissions(final String db, final short perm, final String u, final String g)
-      throws Exception {
+  private static void changeDBPermissions(final String db, final short perm, final String u, final String g) throws Exception {
     Path p = getWhPathForHiveObject(db, null);
     fs.setPermission(p, new FsPermission(perm));
     fs.setOwner(p, u, g);
   }
 
+
+  private static void  createHiveView(Driver driver, String db, String viewName, String tableName) throws IOException {
+    executeQuery(driver, String.format("CREATE OR REPLACE VIEW %s.%s AS SELECT * FROM %s.%s LIMIT 1",
+        db, viewName, db, tableName));
+  }
+
   // Irrespective of each db permissions, all dbs show up in "SHOW SCHEMAS"
   @Test
   public void showSchemas() throws Exception {
@@ -251,123 +322,302 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
   }
 
   /**
-   * "SHOW TABLE" output for a db, should only contain the tables that the user
-   * has access to read. If the user has no read access to the db, the list will be always empty even if the user has
-   * read access to the tables inside the db.
+   * Should only contain the tables that the user
+   * has access to read.
+   *
    * @throws Exception
    */
   @Test
-  public void showTablesUser0() throws Exception {
+  public void user0_db_general_showTables() throws Exception {
     updateClient(org1Users[0]);
+    showTablesHelper(db_general, ImmutableList.of(
+        g_student_u0_700,
+        g_student_u0g0_750,
+        g_student_all_755,
+        g_voter_all_755,
+        g_partitioned_student_u0_700,
+        g_vw_g_student_u0_700
+    ));
+  }
 
-    showTablesHelper(db_general,
-        ImmutableList.of(
-            g_student_u0_700,
-            g_student_u0g0_750,
-            g_student_all_755,
-            g_voter_all_755,
-            g_partitioned_student_u0_700
-        ));
-
-    showTablesHelper(db_u0_only,
-        ImmutableList.of(
-            u0_student_all_755,
-            u0_voter_all_755
-        ));
-
-    showTablesHelper(db_u1g1_only, Collections.<String>emptyList());
+  @Test
+  public void user0_db_u0_only_showTables() throws Exception {
+    updateClient(org1Users[0]);
+    showTablesHelper(db_u0_only, ImmutableList.of(
+        u0_student_all_755,
+        u0_voter_all_755,
+        u0_vw_voter_all_755
+    ));
   }
 
+  /**
+   * If the user has no read access to the db, the list will be always empty even if the user has
+   * read access to the tables inside the db.
+   */
   @Test
-  public void fromInfoSchemaUser0() throws Exception {
+  public void user0_db_u1g1_only_showTables() throws Exception {
     updateClient(org1Users[0]);
+    showTablesHelper(db_u1g1_only, emptyList());
+  }
 
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_general,
+  @Test
+  public void user0_db_general_infoSchema() throws Exception {
+    updateClient(org1Users[0]);
+    fromInfoSchemaHelper(db_general,
         ImmutableList.of(
             g_student_u0_700,
             g_student_u0g0_750,
             g_student_all_755,
             g_voter_all_755,
-            g_partitioned_student_u0_700
+            g_partitioned_student_u0_700,
+            g_vw_g_student_u0_700
         ),
         ImmutableList.of(
             TableType.TABLE,
             TableType.TABLE,
             TableType.TABLE,
             TableType.TABLE,
-            TableType.TABLE
+            TableType.TABLE,
+            TableType.VIEW
         ));
+  }
 
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_u0_only,
+  @Test
+  public void user0_db_u0_only_infoSchema() throws Exception {
+    updateClient(org1Users[0]);
+    fromInfoSchemaHelper(db_u0_only,
         ImmutableList.of(
             u0_student_all_755,
-            u0_voter_all_755
+            u0_voter_all_755,
+            u0_vw_voter_all_755
         ),
         ImmutableList.of(
             TableType.TABLE,
-            TableType.TABLE
+            TableType.TABLE,
+            TableType.VIEW
         ));
+  }
 
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_u1g1_only,
-        Collections.<String>emptyList(),
-        Collections.<TableType>emptyList());
+  @Test
+  public void user0_db_u1g1_only_infoSchema() throws Exception {
+    updateClient(org1Users[0]);
+    fromInfoSchemaHelper(db_u1g1_only, emptyList(), emptyList());
+  }
+
+  /**
+   * user0 is 700 owner
+   */
+  @Test
+  public void user0_allowed_g_student_u0_700() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_general, g_student_u0_700);
   }
 
   @Test
-  public void showTablesUser1() throws Exception {
+  public void user0_allowed_g_vw_u0_700_over_g_student_u0_700() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_general, g_vw_g_student_u0_700);
+  }
+
+  @Test
+  public void user1_forbidden_g_vw_u0_700_over_g_student_u0_700() throws Exception {
     updateClient(org1Users[1]);
+    queryHiveViewFailed(db_general, g_vw_g_student_u0_700);
+  }
 
-    showTablesHelper(db_general,
-        ImmutableList.of(
-            g_student_u0g0_750,
-            g_student_all_755,
-            g_voter_u1_700,
-            g_voter_u2g1_750,
-            g_voter_all_755
-        ));
+  @Test
+  public void user2_forbidden_g_vw_u0_700_over_g_student_u0_700() throws Exception {
+    updateClient(org1Users[2]);
+    queryHiveViewFailed(db_general, g_vw_g_student_u0_700);
+  }
 
-    showTablesHelper(db_u1g1_only,
-        ImmutableList.of(
-            u1g1_student_all_755,
-            u1g1_student_u1_700,
-            u1g1_voter_all_755,
-            u1g1_voter_u1_700
-        ));
+  @Test
+  public void user0_allowed_u0_vw_voter_all_755() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_u0_only, u0_vw_voter_all_755);
+  }
+
+  @Test
+  public void user1_forbidden_u0_vw_voter_all_755() throws Exception {
+    updateClient(org1Users[1]);
+    queryHiveViewFailed(db_u0_only, u0_vw_voter_all_755);
+  }
+
+  @Test
+  public void user2_forbidden_u0_vw_voter_all_755() throws Exception {
+    updateClient(org1Users[2]);
+    queryHiveViewFailed(db_u0_only, u0_vw_voter_all_755);
+  }
+
+  private void queryHiveViewFailed(String db, String viewName) throws Exception {
+    errorMsgTestHelper(
+        String.format("SELECT * FROM hive.%s.%s LIMIT 2", db, viewName),
+        "Failure validating a view your query is dependent upon.");
+  }
+
+  /**
+   * user0 is 750 owner
+   */
+  @Test
+  public void user0_allowed_g_student_u0g0_750() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_general, g_student_u0g0_750);
+  }
+
+  /**
+   * table owned by user2 and group2,
+   * but user0 can access because Others allowed to read and execute
+   */
+  @Test
+  public void user0_allowed_g_student_all_755() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_general, g_student_all_755);
+  }
+
+  /**
+   * user0 can't access because, user1 is 700 owner
+   */
+  @Test
+  public void user0_forbidden_g_voter_u1_700() throws Exception{
+    updateClient(org1Users[0]);
+    queryTableNotFound(db_general, g_voter_u1_700);
+  }
+
+  /**
+   * user0 can't access, because only user2 and group1 members
+   */
+  @Test
+  public void user0_forbidden_g_voter_u2g1_750() throws Exception{
+    updateClient(org1Users[0]);
+    queryTableNotFound(db_general, g_voter_u2g1_750);
+  }
+
+  /**
+   * user0 allowed because others have r-x access. Despite
+   * of user1 and group1 ownership over the table.
+   */
+  @Test
+  public void user0_allowed_g_voter_all_755() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_general, g_voter_all_755);
+  }
 
-    showTablesHelper(db_u0_only, Collections.<String>emptyList());
+  /**
+   * user0 is 755 owner
+   */
+  @Test
+  public void user0_allowed_u0_student_all_755() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_u0_only, u0_student_all_755);
   }
 
+  /**
+   * user0 is 755 owner
+   */
   @Test
-  public void fromInfoSchemaUser1() throws Exception {
+  public void user0_allowed_u0_voter_all_755() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_u0_only, u0_voter_all_755);
+  }
+
+  /**
+   * user0 is 700 owner
+   */
+  @Test
+  public void user0_allowed_g_partitioned_student_u0_700() throws Exception {
+    updateClient(org1Users[0]);
+    queryHiveTableOrView(db_general, g_partitioned_student_u0_700);
+  }
+
+  /**
+   * user0 doesn't have access to database db_u1g1_only
+   */
+  @Test
+  public void user0_forbidden_u1g1_student_all_755() throws Exception {
+    updateClient(org1Users[0]);
+    queryTableNotFound(db_u1g1_only, u1g1_student_all_755);
+  }
+
+  @Test
+  public void user0_allowed_v_student_u0g0_750() throws Exception {
+    updateClient(org1Users[0]);
+    queryView(v_student_u0g0_750);
+  }
+
+  @Test
+  public void user0_forbidden_v_student_u1g1_750() throws Exception {
+    updateClient(org1Users[0]);
+    queryViewNotAuthorized(v_student_u1g1_750);
+  }
+
+  @Test
+  public void user0_allowed_v_partitioned_student_u0g0_750() throws Exception {
+    updateClient(org1Users[0]);
+    queryView(v_partitioned_student_u0g0_750);
+  }
+
+  @Test
+  public void user0_forbidden_v_partitioned_student_u1g1_750() throws Exception {
+    updateClient(org1Users[0]);
+    queryViewNotAuthorized(v_partitioned_student_u1g1_750);
+  }
+
+  @Test
+  public void user1_db_general_showTables() throws Exception {
     updateClient(org1Users[1]);
+    showTablesHelper(db_general, ImmutableList.of(
+        g_student_u0g0_750,
+        g_student_all_755,
+        g_voter_u1_700,
+        g_voter_u2g1_750,
+        g_voter_all_755,
+        g_vw_g_student_u0_700
+    ));
+  }
 
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_general,
+  @Test
+  public void user1_db_u1g1_only_showTables() throws Exception {
+    updateClient(org1Users[1]);
+    showTablesHelper(db_u1g1_only, ImmutableList.of(
+        u1g1_student_all_755,
+        u1g1_student_u1_700,
+        u1g1_voter_all_755,
+        u1g1_voter_u1_700
+    ));
+  }
+
+  @Test
+  public void user1_db_u0_only_showTables() throws Exception {
+    updateClient(org1Users[1]);
+    showTablesHelper(db_u0_only, newArrayList(u0_vw_voter_all_755));
+  }
+
+  @Test
+  public void user1_db_general_infoSchema() throws Exception {
+    updateClient(org1Users[1]);
+    fromInfoSchemaHelper(db_general,
         ImmutableList.of(
             g_student_u0g0_750,
             g_student_all_755,
             g_voter_u1_700,
             g_voter_u2g1_750,
-            g_voter_all_755
+            g_voter_all_755,
+            g_vw_g_student_u0_700
         ),
         ImmutableList.of(
             TableType.TABLE,
             TableType.TABLE,
             TableType.TABLE,
             TableType.TABLE,
-            TableType.TABLE
+            TableType.TABLE,
+            TableType.VIEW
         ));
+  }
 
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_u1g1_only,
+  @Test
+  public void user1_db_u1g1_only_infoSchema() throws Exception {
+    updateClient(org1Users[1]);
+    fromInfoSchemaHelper(db_u1g1_only,
         ImmutableList.of(
             u1g1_student_all_755,
             u1g1_student_u1_700,
@@ -380,198 +630,270 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
             TableType.TABLE,
             TableType.TABLE
         ));
-
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_u0_only,
-        Collections.<String>emptyList(),
-        Collections.<TableType>emptyList());
   }
 
   @Test
-  public void showTablesUser2() throws Exception {
-    updateClient(org1Users[2]);
-
-    showTablesHelper(db_general,
-        ImmutableList.of(
-            g_student_all_755,
-            g_voter_u2g1_750,
-            g_voter_all_755
-        ));
+  public void user1_db_u0_only_infoSchema() throws Exception {
+    updateClient(org1Users[1]);
+    fromInfoSchemaHelper(db_u0_only,
+        newArrayList(u0_vw_voter_all_755), newArrayList(TableType.VIEW));
+  }
 
-    showTablesHelper(db_u1g1_only,
-        ImmutableList.of(
-            u1g1_student_all_755,
-            u1g1_voter_all_755
-        ));
+  /**
+   * user1 can't access, because user0 is 700 owner
+   */
+  @Test
+  public void user1_forbidden_g_student_u0_700() throws Exception {
+    updateClient(org1Users[1]);
+    queryTableNotFound(db_general, g_student_u0_700);
+  }
 
-    showTablesHelper(db_u0_only, Collections.<String>emptyList());
+  /**
+   * user1 allowed because he's a member of group0
+   */
+  @Test
+  public void user1_allowed_g_student_u0g0_750() throws Exception {
+    updateClient(org1Users[1]);
+    queryHiveTableOrView(db_general, g_student_u0g0_750);
   }
 
+  /**
+   * user1 allowed because Others have r-x access
+   */
   @Test
-  public void fromInfoSchemaUser2() throws Exception {
-    updateClient(org1Users[2]);
+  public void user1_allowed_g_student_all_755() throws Exception {
+    updateClient(org1Users[1]);
+    queryHiveTableOrView(db_general, g_student_all_755);
+  }
 
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_general,
-        ImmutableList.of(
-            g_student_all_755,
-            g_voter_u2g1_750,
-            g_voter_all_755
-        ),
-        ImmutableList.of(
-            TableType.TABLE,
-            TableType.TABLE,
-            TableType.TABLE
-        ));
+  /**
+   * user1 is 700 owner
+   */
+  @Test
+  public void user1_allowed_g_voter_u1_700() throws Exception {
+    updateClient(org1Users[1]);
+    queryHiveTableOrView(db_general, g_voter_u1_700);
+  }
 
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_u1g1_only,
-        ImmutableList.of(
-            u1g1_student_all_755,
-            u1g1_voter_all_755
-        ),
-        ImmutableList.of(
-            TableType.TABLE,
-            TableType.TABLE
-        ));
+  /**
+   * user1 allowed because he's member of group1
+   */
+  @Test
+  public void user1_allowed_g_voter_u2g1_750() throws Exception {
+    updateClient(org1Users[1]);
+    queryHiveTableOrView(db_general, g_voter_u2g1_750);
+  }
 
-    fromInfoSchemaHelper(
-        hivePluginName,
-        db_u0_only,
-        Collections.<String>emptyList(),
-        Collections.<TableType>emptyList());
+  /**
+   * user1 is 755 owner
+   */
+  @Test
+  public void user1_allowed_g_voter_all_755() throws Exception {
+    updateClient(org1Users[1]);
+    queryHiveTableOrView(db_general, g_voter_all_755);
   }
 
-  // Try to read the tables "user0" has access to read in db_general.
+  /**
+   * here access restricted at db level, only user0 can access  db_u0_only
+   */
   @Test
-  public void selectUser0_db_general() throws Exception {
-    updateClient(org1Users[0]);
+  public void user1_forbidden_u0_student_all_755() throws Exception {
+    updateClient(org1Users[1]);
+    queryTableNotFound(db_u0_only, u0_student_all_755);
+  }
 
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY gpa DESC LIMIT 2", db_general, g_student_u0_700));
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY gpa DESC LIMIT 2", db_general, g_student_all_755));
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY name DESC LIMIT 2", db_general, g_voter_all_755));
+  /**
+   * here access restricted at db level, only user0 can access db_u0_only
+   */
+  @Test
+  public void user1_forbidden_u0_voter_all_755() throws Exception {
+    updateClient(org1Users[1]);
+    queryTableNotFound(db_u0_only, u0_voter_all_755);
+  }
 
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY gpa DESC LIMIT 2", db_general, g_partitioned_student_u0_700));
+  @Test
+  public void user1_allowed_v_student_u0g0_750() throws Exception {
+    updateClient(org1Users[1]);
+    queryView(v_student_u0g0_750);
   }
 
-  // Try to read the table that "user0" has access to read in db_u0_only
   @Test
-  public void selectUser0_db_u0_only() throws Exception {
-    updateClient(org1Users[0]);
+  public void user1_allowed_v_student_u1g1_750() throws Exception {
+    updateClient(org1Users[1]);
+    queryView(v_student_u1g1_750);
+  }
 
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY gpa DESC LIMIT 2", db_u0_only, u0_student_all_755));
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY name DESC LIMIT 2", db_u0_only, u0_voter_all_755));
+  @Test
+  public void user1_allowed_v_partitioned_student_u0g0_750() throws Exception {
+    updateClient(org1Users[1]);
+    queryView(v_partitioned_student_u0g0_750);
   }
 
-  // Try to read the tables "user0" has no access to read in db_u1g1_only
   @Test
-  public void selectUser0_db_u1g1_only() throws Exception {
-    updateClient(org1Users[0]);
+  public void user1_allowed_v_partitioned_student_u1g1_750() throws Exception {
+    updateClient(org1Users[1]);
+    queryView(v_partitioned_student_u1g1_750);
+  }
 
-    errorMsgTestHelper(
-        String.format("SELECT * FROM hive.%s.%s ORDER BY gpa DESC LIMIT 2", db_u1g1_only, u1g1_student_all_755),
-        String.format("Object '%s' not found within 'hive.%s'", u1g1_student_all_755, db_u1g1_only));
+  @Test
+  public void user2_db_general_showTables() throws Exception {
+    updateClient(org1Users[2]);
+    showTablesHelper(db_general, ImmutableList.of(
+        g_student_all_755,
+        g_voter_u2g1_750,
+        g_voter_all_755,
+        g_vw_g_student_u0_700
+    ));
   }
 
-  // Try to read the tables "user1" has access to read in db_general.
   @Test
-  public void selectUser1_db_general() throws Exception {
-    updateClient(org1Users[1]);
+  public void user2_db_u1g1_only_showTables() throws Exception {
+    updateClient(org1Users[2]);
+    showTablesHelper(db_u1g1_only, ImmutableList.of(
+        u1g1_student_all_755,
+        u1g1_voter_all_755
+    ));
+  }
 
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY gpa DESC LIMIT 2", db_general, g_student_u0g0_750));
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY gpa DESC LIMIT 2", db_general, g_student_all_755));
-    test(String.format("SELECT * FROM hive.%s.%s ORDER BY name DESC LIMIT 2", db_general, g_voter_u2g1_750));
+  @Test
+  public void user2_db_u0_only_showTables() throws Exception {
+    updateClient(org1Users[2]);
+    showTablesHelper(db_u0_only, newArrayList(u0_vw_voter_all_755));
   }
 
-  // Try to read the tables "user1" has no access to read in db_u0_only
   @Test
-  public void selectUser1_db_u0_only() throws Exception {
-    updateClient(org1Users[1]);
+  public void user2_db_general_infoSchema() throws Exception {
+    updateClient(org1Users[2]);
+    fromInfoSchemaHelper(db_general,
+        ImmutableList.of(
+            g_student_all_755,
+            g_voter_u2g1_750,
+            g_voter_all_755,
+            g_vw_g_student_u0_700
+        ),
+        ImmutableList.of(
+            TableType.TABLE,
+            TableType.TABLE,
+            TableType.TABLE,
+            TableType.VIEW
+        ));
+  }
 
-    errorMsgTestHelper(
-        String.format("SELECT * FROM hive.%s.%s ORDER BY gpa DESC LIMIT 2", db_u0_only, u0_student_all_755),
-        String.format("Object '%s' not found within 'hive.%s'", u0_student_all_755, db_u0_only));
+  @Test
+  public void user2_db_u1g1_only_infoSchema() throws Exception {
+    updateClient(org1Users[2]);
+    fromInfoSchemaHelper(db_u1g1_only,
+        ImmutableList.of(
+            u1g1_student_all_755,
+            u1g1_voter_all_755
+        ),
+        ImmutableList.of(
+            TableType.TABLE,
+            TableType.TABLE
+        ));
   }
 
-  private static void queryViewHelper(final String queryUser, final String query) throws Exception {
-    updateClient(queryUser);
-    testBuilder()
-        .sqlQuery(query)
-        .unOrdered()
-        .baselineColumns("rownum")
-        .baselineValues(1)
-        .go();
+  @Test
+  public void user2_db_u0_only_infoSchema() throws Exception {
+    updateClient(org1Users[2]);
+    fromInfoSchemaHelper(db_u0_only, newArrayList(u0_vw_voter_all_755),
+        newArrayList(TableType.VIEW));
   }
 
+  /**
+   * user2 can't access, because user0 is 700 owner
+   */
   @Test
-  public void selectUser0_v_student_u0g0_750() throws Exception {
-    queryViewHelper(org1Users[0], query_v_student_u0g0_750);
+  public void user2_forbidden_g_student_u0_700() throws Exception {
+    updateClient(org1Users[2]);
+    queryTableNotFound(db_general, g_student_u0_700);
   }
 
+  /**
+   * user2 can't access, only user0 and group0 members have access
+   */
   @Test
-  public void selectUser1_v_student_u0g0_750() throws Exception {
-    queryViewHelper(org1Users[1], query_v_student_u0g0_750);
+  public void user2_forbidden_g_student_u0g0_750() throws Exception {
+    updateClient(org1Users[2]);
+    queryTableNotFound(db_general, g_student_u0_700);
   }
 
+  /**
+   * user2 is 755 owner
+   */
   @Test
-  public void selectUser2_v_student_u0g0_750() throws Exception {
+  public void user2_allowed_g_student_all_755() throws Exception {
     updateClient(org1Users[2]);
-    errorMsgTestHelper(query_v_student_u0g0_750, String.format(
-        "Not authorized to read view [v_student_u0g0_750] in schema [%s.tmp]", MINI_DFS_STORAGE_PLUGIN_NAME));
+    queryHiveTableOrView(db_general, g_student_all_755);
   }
 
+  /**
+   * user2 can't access, because user1 is 700 owner
+   */
   @Test
-  public void selectUser0_v_student_u1g1_750() throws Exception {
-    updateClient(org1Users[0]);
-    errorMsgTestHelper(query_v_student_u1g1_750, String.format(
-        "Not authorized to read view [v_student_u1g1_750] in schema [%s.tmp]", MINI_DFS_STORAGE_PLUGIN_NAME));
+  public void user2_forbidden_g_voter_u1_700() throws Exception {
+    updateClient(org1Users[2]);
+    queryTableNotFound(db_general, g_voter_u1_700);
   }
 
+  /**
+   * user2 is 750 owner
+   */
   @Test
-  public void selectUser1_v_student_u1g1_750() throws Exception {
-    queryViewHelper(org1Users[1], query_v_student_u1g1_750);
+  public void user2_allowed_g_voter_u2g1_750() throws Exception {
+    updateClient(org1Users[2]);
+    queryHiveTableOrView(db_general, g_voter_u2g1_750);
   }
 
+  /**
+   * user2 is member of group1
+   */
   @Test
-  public void selectUser2_v_student_u1g1_750() throws Exception {
-    queryViewHelper(org1Users[2], query_v_student_u1g1_750);
+  public void user2_allowed_g_voter_all_755() throws Exception {
+    updateClient(org1Users[2]);
+    queryHiveTableOrView(db_general, g_voter_all_755);
   }
 
+  /**
+   * here access restricted at db level, only user0 can access db_u0_only
+   */
   @Test
-  public void selectUser0_v_partitioned_student_u0g0_750() throws Exception {
-    queryViewHelper(org1Users[0], query_v_partitioned_student_u0g0_750);
+  public void user2_forbidden_u0_student_all_755() throws Exception {
+    updateClient(org1Users[2]);
+    queryTableNotFound(db_u0_only, u0_student_all_755);
   }
 
+  /**
+   * here access restricted at db level, only user0 can access db_u0_only
+   */
   @Test
-  public void selectUser1_v_partitioned_student_u0g0_750() throws Exception {
-    queryViewHelper(org1Users[1], query_v_partitioned_student_u0g0_750);
+  public void user2_forbidden_u0_voter_all_755() throws Exception {
+    updateClient(org1Users[2]);
+    queryTableNotFound(db_u0_only, u0_voter_all_755);
   }
 
   @Test
-  public void selectUser2_v_partitioned_student_u0g0_750() throws Exception {
+  public void user2_forbidden_v_student_u0g0_750() throws Exception {
     updateClient(org1Users[2]);
-    errorMsgTestHelper(query_v_partitioned_student_u0g0_750, String.format(
-        "Not authorized to read view [v_partitioned_student_u0g0_750] in schema [%s.tmp]", MINI_DFS_STORAGE_PLUGIN_NAME));
+    queryViewNotAuthorized(v_student_u0g0_750);
   }
 
   @Test
-  public void selectUser0_v_partitioned_student_u1g1_750() throws Exception {
-    updateClient(org1Users[0]);
-    errorMsgTestHelper(query_v_partitioned_student_u1g1_750, String.format(
-        "Not authorized to read view [v_partitioned_student_u1g1_750] in schema [%s.tmp]", MINI_DFS_STORAGE_PLUGIN_NAME));
+  public void user2_allowed_v_student_u1g1_750() throws Exception {
+    updateClient(org1Users[2]);
+    queryView(v_student_u1g1_750);
   }
 
   @Test
-  public void selectUser1_v_partitioned_student_u1g1_750() throws Exception {
-    queryViewHelper(org1Users[1], query_v_partitioned_student_u1g1_750);
+  public void user2_forbidden_v_partitioned_student_u0g0_750() throws Exception {
+    updateClient(org1Users[2]);
+    queryViewNotAuthorized(v_partitioned_student_u0g0_750);
   }
 
   @Test
-  public void selectUser2_v_partitioned_student_u1g1_750() throws Exception {
-    queryViewHelper(org1Users[2], query_v_partitioned_student_u1g1_750);
+  public void user2_allowed_v_partitioned_student_u1g1_750() throws Exception {
+    updateClient(org1Users[2]);
+    queryView(v_partitioned_student_u1g1_750);
   }
 
   @AfterClass
@@ -579,4 +901,15 @@ public class TestStorageBasedHiveAuthorization extends BaseTestHiveImpersonation
     stopMiniDfsCluster();
     stopHiveMetaStore();
   }
+
+  private static void queryHiveTableOrView(String db, String table) throws Exception {
+    test(String.format("SELECT * FROM hive.%s.%s LIMIT 2", db, table));
+  }
+
+  private static void queryTableNotFound(String db, String table) throws Exception {
+    errorMsgTestHelper(
+        String.format("SELECT * FROM hive.%s.%s LIMIT 2", db, table),
+        String.format("Object '%s' not found within 'hive.%s'", table, db));
+  }
+
 }
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/sql/hive/TestViewSupportOnHiveTables.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/sql/hive/TestViewSupportOnHiveTables.java
index eed3fae..821200d 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/sql/hive/TestViewSupportOnHiveTables.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/sql/hive/TestViewSupportOnHiveTables.java
@@ -103,10 +103,10 @@ public class TestViewSupportOnHiveTables extends TestBaseViewSupport {
   public void testInfoSchemaWithHiveView() throws Exception {
     testBuilder()
         .optionSettingQueriesForTestQuery("USE hive.`default`")
-        .sqlQuery("SELECT * FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_NAME = 'hiveview'")
+        .sqlQuery("SELECT * FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_NAME = 'hive_view'")
         .unOrdered()
         .baselineColumns("TABLE_CATALOG", "TABLE_SCHEMA", "TABLE_NAME", "VIEW_DEFINITION")
-        .baselineValues("DRILL", "hive.default", "hiveview", "SELECT `kv`.`key`, `kv`.`value` FROM `default`.`kv`")
+        .baselineValues("DRILL", "hive.default", "hive_view", "SELECT `kv`.`key`, `kv`.`value` FROM `default`.`kv`")
         .go();
   }
 
@@ -116,4 +116,5 @@ public class TestViewSupportOnHiveTables extends TestBaseViewSupport {
       hiveTest.deleteHiveTestPlugin(getDrillbitContext().getStorage());
     }
   }
+
 }
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
index 84fa368..0b9cd36 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
@@ -459,9 +459,6 @@ public class HiveTestDataGenerator {
             "  char_part='char')"
     );
 
-    // create a Hive view to test how its metadata is populated in Drill's INFORMATION_SCHEMA
-    executeQuery(hiveDriver, "CREATE VIEW IF NOT EXISTS hiveview AS SELECT * FROM kv");
-
     executeQuery(hiveDriver, "CREATE TABLE IF NOT EXISTS " +
         "partition_pruning_test_loadtable(a DATE, b TIMESTAMP, c INT, d INT, e INT) " +
         "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE");
@@ -547,6 +544,14 @@ public class HiveTestDataGenerator {
 
     createSubDirTable(hiveDriver, testDataFile);
 
+    // hive views
+    executeQuery(hiveDriver, "CREATE OR REPLACE VIEW readtest_view AS SELECT * FROM readtest");
+    executeQuery(hiveDriver, "CREATE VIEW IF NOT EXISTS hive_view AS SELECT * FROM kv");
+    executeQuery(hiveDriver, "CREATE OR REPLACE VIEW kv_native_view AS SELECT * FROM kv_native");
+    executeQuery(hiveDriver, "CREATE MATERIALIZED VIEW IF NOT EXISTS hive_view_m AS SELECT * FROM kv WHERE key = 1");
+    executeQuery(hiveDriver, "CREATE OR REPLACE VIEW view_over_hive_view AS SELECT * FROM hive_view WHERE key BETWEEN 2 AND 3");
+    executeQuery(hiveDriver, "CREATE OR REPLACE VIEW db1.two_table_view AS SELECT COUNT(dk.key) dk_key_count FROM db1.avro dk " +
+        "INNER JOIN kv ON kv.key = dk.key");
     ss.close();
   }
 
@@ -696,4 +701,5 @@ public class HiveTestDataGenerator {
 
     return sb.toString();
   }
-}
\ No newline at end of file
+
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java
index 2eb9137..a90a135 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java
@@ -17,6 +17,9 @@
  */
 package org.apache.drill.exec.planner.logical;
 
+import java.util.List;
+
+import org.apache.calcite.schema.SchemaPlus;
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
 import org.apache.calcite.config.CalciteConnectionConfig;
 import org.apache.calcite.schema.Schema.TableType;
@@ -67,7 +70,7 @@ public class DrillViewTable implements TranslatableTable, DrillViewInfoProvider
 
       if (viewExpansionContext.isImpersonationEnabled()) {
         token = viewExpansionContext.reserveViewExpansionToken(viewOwner);
-        rel = context.expandView(rowType, view.getSql(), token.getSchemaTree(), view.getWorkspaceSchemaPath()).rel;
+        rel = expandViewForImpersonatedUser(context, rowType, view.getWorkspaceSchemaPath(), token.getSchemaTree());
       } else {
         rel = context.expandView(rowType, view.getSql(), view.getWorkspaceSchemaPath(), ImmutableList.<String>of()).rel;
       }
@@ -85,6 +88,14 @@ public class DrillViewTable implements TranslatableTable, DrillViewInfoProvider
     }
   }
 
+
+  protected RelNode expandViewForImpersonatedUser(ToRelContext context,
+                                                  RelDataType rowType,
+                                                  List<String> workspaceSchemaPath,
+                                                  SchemaPlus tokenSchemaTree) {
+    return context.expandView(rowType, view.getSql(), tokenSchemaTree, workspaceSchemaPath).rel;
+  }
+
   @Override
   public TableType getJdbcTableType() {
     return TableType.VIEW;
@@ -104,4 +115,5 @@ public class DrillViewTable implements TranslatableTable, DrillViewInfoProvider
   @Override public boolean isRolledUp(String column) {
     return false;
   }
+
 }
diff --git a/logical/pom.xml b/logical/pom.xml
index 468dc17..caa126c 100644
--- a/logical/pom.xml
+++ b/logical/pom.xml
@@ -104,9 +104,6 @@
       </plugin>
       <plugin>
         <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <useSystemClassLoader>false</useSystemClassLoader>
-        </configuration>
       </plugin>
       <plugin>
         <groupId>org.antlr</groupId>


[drill] 08/10: DRILL-6888: Move nested classes outside HashAggTemplate to allow for plain java compile option closes #1569

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit 10b105953fc2cdee1bee8970ecbaeca285d6bb2d
Author: Ben-Zvi <bb...@mapr.com>
AuthorDate: Mon Dec 10 13:52:53 2018 -0800

    DRILL-6888: Move nested classes outside HashAggTemplate to allow for plain java compile option
    closes #1569
---
 .../impl/aggregate/HashAggSpilledPartition.java    | 51 +++++++++++++++++++
 .../physical/impl/aggregate/HashAggTemplate.java   | 58 +---------------------
 .../physical/impl/aggregate/HashAggUpdater.java    | 44 ++++++++++++++++
 3 files changed, 97 insertions(+), 56 deletions(-)

diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggSpilledPartition.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggSpilledPartition.java
new file mode 100644
index 0000000..e5e82e0
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggSpilledPartition.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.physical.impl.aggregate;
+
+import org.apache.drill.exec.physical.impl.common.AbstractSpilledPartitionMetadata;
+import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
+
+public class HashAggSpilledPartition extends AbstractSpilledPartitionMetadata {
+  private final int spilledBatches;
+  private final String spillFile;
+
+  public HashAggSpilledPartition(final int cycle,
+                                 final int originPartition,
+                                 final int prevOriginPartition,
+                                 final int spilledBatches,
+                                 final String spillFile) {
+    super(cycle, originPartition, prevOriginPartition);
+
+    this.spilledBatches = spilledBatches;
+    this.spillFile = Preconditions.checkNotNull(spillFile);
+  }
+
+  public int getSpilledBatches() {
+    return spilledBatches;
+  }
+
+  public String getSpillFile() {
+    return spillFile;
+  }
+
+  @Override
+  public String makeDebugString() {
+    return String.format("Start reading spilled partition %d (prev %d) from cycle %d.",
+      this.getOriginPartition(), this.getPrevOriginPartition(), this.getCycle());
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
index d10a84a..2f50dd6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
@@ -49,7 +49,6 @@ import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.ops.OperatorStats;
 import org.apache.drill.exec.physical.base.AbstractBase;
 import org.apache.drill.exec.physical.config.HashAggregate;
-import org.apache.drill.exec.physical.impl.common.AbstractSpilledPartitionMetadata;
 import org.apache.drill.exec.physical.impl.common.ChainedHashTable;
 import org.apache.drill.exec.physical.impl.common.CodeGenMemberInjector;
 import org.apache.drill.exec.physical.impl.common.HashTable;
@@ -84,7 +83,6 @@ import org.apache.drill.exec.vector.ObjectVector;
 import org.apache.drill.exec.vector.ValueVector;
 
 import org.apache.drill.exec.vector.VariableWidthVector;
-import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 
 import static org.apache.drill.exec.physical.impl.common.HashTable.BATCH_MASK;
 import static org.apache.drill.exec.record.RecordBatch.MAX_BATCH_ROW_COUNT;
@@ -149,7 +147,7 @@ public abstract class HashAggTemplate implements HashAggregator {
   private int outBatchIndex[];
 
   // For handling spilling
-  private HashAggUpdater updater = new HashAggUpdater();
+  private HashAggUpdater updater;
   private SpilledState<HashAggSpilledPartition> spilledState = new SpilledState<>();
   private SpillSet spillSet;
   SpilledRecordbatch newIncoming; // when reading a spilled file - work like an "incoming"
@@ -171,59 +169,6 @@ public abstract class HashAggTemplate implements HashAggregator {
   private OperatorStats stats = null;
   private HashTableStats htStats = new HashTableStats();
 
-  public static class HashAggSpilledPartition extends AbstractSpilledPartitionMetadata {
-    private final int spilledBatches;
-    private final String spillFile;
-
-    public HashAggSpilledPartition(final int cycle,
-                                   final int originPartition,
-                                   final int prevOriginPartition,
-                                   final int spilledBatches,
-                                   final String spillFile) {
-      super(cycle, originPartition, prevOriginPartition);
-
-      this.spilledBatches = spilledBatches;
-      this.spillFile = Preconditions.checkNotNull(spillFile);
-    }
-
-    public int getSpilledBatches() {
-      return spilledBatches;
-    }
-
-    public String getSpillFile() {
-      return spillFile;
-    }
-
-    @Override
-    public String makeDebugString() {
-      return String.format("Start reading spilled partition %d (prev %d) from cycle %d.",
-        this.getOriginPartition(), this.getPrevOriginPartition(), this.getCycle());
-    }
-  }
-
-  public class HashAggUpdater implements SpilledState.Updater {
-
-    @Override
-    public void cleanup() {
-      this.cleanup();
-    }
-
-    @Override
-    public String getFailureMessage() {
-      return null;
-    }
-
-    @Override
-    public long getMemLimit() {
-      return allocator.getLimit();
-    }
-
-    @Override
-    public boolean hasPartitionLimit() {
-      return false;
-    }
-  }
-
   public enum Metric implements MetricDef {
 
     NUM_BUCKETS,
@@ -375,6 +320,7 @@ public abstract class HashAggTemplate implements HashAggregator {
     this.context = context;
     this.stats = oContext.getStats();
     this.allocator = oContext.getAllocator();
+    this.updater = new HashAggUpdater(allocator);
     this.oContext = oContext;
     this.incoming = incoming;
     this.outgoing = outgoing;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggUpdater.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggUpdater.java
new file mode 100644
index 0000000..3ee26eb
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggUpdater.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.physical.impl.aggregate;
+
+import org.apache.drill.exec.memory.BufferAllocator;
+import org.apache.drill.exec.physical.impl.common.SpilledState;
+
+public class HashAggUpdater implements SpilledState.Updater {
+  private final BufferAllocator allocator;
+  public HashAggUpdater(BufferAllocator allocator) { this.allocator = allocator; }
+
+  @Override
+  public void cleanup() { }
+
+  @Override
+  public String getFailureMessage() {
+    return null;
+  }
+
+  @Override
+  public long getMemLimit() {
+    return allocator.getLimit();
+  }
+
+  @Override
+  public boolean hasPartitionLimit() {
+    return false;
+  }
+}


[drill] 09/10: DRILL-6879: Show warnings for potential performance issues

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit e65079a5d6f9b4e54783bef9f3af64a0684af3f0
Author: Kunal Khatua <kk...@maprtech.com>
AuthorDate: Tue Dec 18 10:33:07 2018 -0800

    DRILL-6879: Show warnings for potential performance issues
    
    1. Introduced warning for non-progressive fragments. Based on a threshold (`drill.exec.http.profile.warning.progress.threshold`), if all fragments have not made progress within that time, a warning is issued. The default is 5 minutes (300 sec)
    
    2. Introduced a warning if any of the buffered operators spill to disk.
    
    3. Introduced a warning for operators where the longest running fragment runs beyond a minimum threshold (drill.exec.http.profile.warning.time.skew.min), and runs atleast 2 times longer than the average (drill.exec.http.profile.warning.time.skew.ratio.process). The clock symbol with a tooltip indicates the extent of the skew. For wait times, the ratio is defined by `drill.exec.http.profile.warning.time.skew.ratio.wait`
    
    3. Introduced a warning for operators where the average wait time of a scan operator exceeds its processing time, for a minimum threshold (drill.exec.http.profile.warning.scan.wait.min). The turtle symbol with a tooltip indicates which scan operator spent more time waiting than processing.
    
    4. TableBuilder Refactored
     a. Using attribute map instead of String arguments, eg. for 'title'
     b. Removed APIs that pass a hyperlink since that is never used.
    closes #1572
---
 .../java/org/apache/drill/exec/ExecConstants.java  |  11 ++
 .../exec/server/rest/profile/FragmentWrapper.java  |  39 ++++--
 .../exec/server/rest/profile/HtmlAttribute.java    |  37 ++++++
 .../exec/server/rest/profile/OperatorWrapper.java  |  94 ++++++++++----
 .../exec/server/rest/profile/ProfileResources.java |   4 +-
 .../exec/server/rest/profile/ProfileWrapper.java   |  16 ++-
 .../exec/server/rest/profile/TableBuilder.java     | 140 ++++++++-------------
 .../java-exec/src/main/resources/drill-module.conf |  11 ++
 .../src/main/resources/rest/profile/profile.ftl    | 113 +++++++++++++----
 .../src/main/resources/rest/static/img/turtle.png  | Bin 0 -> 469 bytes
 10 files changed, 313 insertions(+), 152 deletions(-)

diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 77cfb9f..ceae237 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -785,6 +785,17 @@ public final class ExecConstants {
   public static final BooleanValidator DYNAMIC_UDF_SUPPORT_ENABLED_VALIDATOR = new BooleanValidator(DYNAMIC_UDF_SUPPORT_ENABLED,
       new OptionDescription("Enables users to dynamically upload UDFs. Users must upload their UDF (source and binary) JAR files to a staging directory in the distributed file system before issuing the CREATE FUNCTION USING JAR command to register a UDF. Default is true. (Drill 1.9+)"));
 
+  //Trigger warning in UX if fragments appear to be doing no work (units are in seconds).
+  public static final String PROFILE_WARNING_PROGRESS_THRESHOLD = "drill.exec.http.profile.warning.progress.threshold";
+  //Trigger warning in UX if slowest fragment operator crosses min threshold and exceeds ratio with average (units are in seconds).
+  public static final String PROFILE_WARNING_TIME_SKEW_MIN = "drill.exec.http.profile.warning.time.skew.min";
+  //Threshold Ratio for Processing (i.e. "maxProcessing : avgProcessing" ratio must exceed this defined threshold to show a skew warning)
+  public static final String PROFILE_WARNING_TIME_SKEW_RATIO_PROCESS = "drill.exec.http.profile.warning.time.skew.ratio.process";
+  //Trigger warning in UX if slowest fragment SCAN crosses min threshold and exceeds ratio with average (units are in seconds).
+  public static final String PROFILE_WARNING_SCAN_WAIT_MIN = "drill.exec.http.profile.warning.scan.wait.min";
+  //Threshold Ratio for Waiting (i.e. "maxWait : avgWait" ratio must exceed this defined threshold to show a skew warning)
+  public static final String PROFILE_WARNING_TIME_SKEW_RATIO_WAIT = "drill.exec.http.profile.warning.time.skew.ratio.wait";
+
   /**
    * Option to save query profiles. If false, no query profile will be saved
    * for any query.
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/FragmentWrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/FragmentWrapper.java
index d8c5ecb..aead9e0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/FragmentWrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/FragmentWrapper.java
@@ -22,12 +22,14 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.proto.UserBitShared.MajorFragmentProfile;
 import org.apache.drill.exec.proto.UserBitShared.MinorFragmentProfile;
 import org.apache.drill.exec.proto.UserBitShared.OperatorProfile;
 import org.apache.drill.exec.proto.UserBitShared.StreamProfile;
-
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 import org.apache.drill.shaded.guava.com.google.common.collect.Collections2;
 
@@ -37,10 +39,13 @@ import org.apache.drill.shaded.guava.com.google.common.collect.Collections2;
 public class FragmentWrapper {
   private final MajorFragmentProfile major;
   private final long start;
+  private final int runningProfileProgressThreshold;
 
-  public FragmentWrapper(final MajorFragmentProfile major, final long start) {
+  public FragmentWrapper(final MajorFragmentProfile major, final long start, DrillConfig config) {
     this.major = Preconditions.checkNotNull(major);
     this.start = start;
+    //Threshold to track if query made no progress in specified elapsed time
+    runningProfileProgressThreshold = config.getInt(ExecConstants.PROFILE_WARNING_PROGRESS_THRESHOLD);
   }
 
   public String getDisplayName() {
@@ -83,7 +88,7 @@ public class FragmentWrapper {
 
     // If there are no stats to aggregate, create an empty row
     if (complete.size() < 1) {
-      tb.appendRepeated("", null, NUM_NULLABLE_ACTIVE_OVERVIEW_COLUMNS);
+      tb.appendRepeated("", NUM_NULLABLE_ACTIVE_OVERVIEW_COLUMNS);
       return;
     }
 
@@ -118,15 +123,23 @@ public class FragmentWrapper {
     tb.appendMillis(cumulativeFragmentDurationInMillis / complete.size());
     tb.appendMillis(longRun.getEndTime() - longRun.getStartTime());
 
-    tb.appendPercent(totalProcessInMillis / (totalProcessInMillis + totalWaitInMillis), null,
-        //#8721 is the summation sign: sum(Busy): ## + sum(Wait): ##
+    Map<String, String> percBusyAttrMap = new HashMap<>();
+    //#8721 is the summation sign: sum(Busy): ## + sum(Wait): ##
+    percBusyAttrMap.put(HtmlAttribute.TITLE,
         String.format("&#8721;Busy: %,.2fs + &#8721;Wait: %,.2fs", totalProcessInMillis/1E3, totalWaitInMillis/1E3));
+    tb.appendPercent(totalProcessInMillis / (totalProcessInMillis + totalWaitInMillis), percBusyAttrMap);
 
     final MinorFragmentProfile lastUpdate = Collections.max(complete, Comparators.lastUpdate);
     tb.appendMillis(System.currentTimeMillis()-lastUpdate.getLastUpdate());
 
     final MinorFragmentProfile lastProgress = Collections.max(complete, Comparators.lastProgress);
-    tb.appendMillis(System.currentTimeMillis()-lastProgress.getLastProgress());
+    long elapsedSinceLastProgress = System.currentTimeMillis()-lastProgress.getLastProgress();
+    Map<String, String> lastProgressAttrMap = null;
+    if (elapsedSinceLastProgress > TimeUnit.SECONDS.toMillis(runningProfileProgressThreshold)) {
+      lastProgressAttrMap = new HashMap<>();
+      lastProgressAttrMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_NO_PROGRESS_TAG);
+    }
+    tb.appendMillis(elapsedSinceLastProgress, lastProgressAttrMap);
 
     // TODO(DRILL-3494): Names (maxMem, getMaxMemoryUsed) are misleading; the value is peak memory allocated to fragment
     final MinorFragmentProfile maxMem = Collections.max(complete, Comparators.fragmentPeakMemory);
@@ -162,7 +175,7 @@ public class FragmentWrapper {
 
     // If there are no stats to aggregate, create an empty row
     if (complete.size() < 1) {
-      tb.appendRepeated("", null, NUM_NULLABLE_COMPLETED_OVERVIEW_COLUMNS);
+      tb.appendRepeated("", NUM_NULLABLE_COMPLETED_OVERVIEW_COLUMNS);
       return;
     }
 
@@ -195,9 +208,11 @@ public class FragmentWrapper {
     tb.appendMillis(totalDuration / complete.size());
     tb.appendMillis(longRun.getEndTime() - longRun.getStartTime());
 
-    tb.appendPercent(totalProcessInMillis / (totalProcessInMillis + totalWaitInMillis), null,
-        //#8721 is the summation sign: sum(Busy): ## + sum(Wait): ##
+    Map<String, String> percBusyAttrMap = new HashMap<>();
+    //#8721 is the summation sign: sum(Busy): ## + sum(Wait): ##
+    percBusyAttrMap.put(HtmlAttribute.TITLE,
         String.format("&#8721;Busy: %,.2fs + &#8721;Wait: %,.2fs", totalProcessInMillis/1E3, totalWaitInMillis/1E3));
+    tb.appendPercent(totalProcessInMillis / (totalProcessInMillis + totalWaitInMillis), percBusyAttrMap);
 
     // TODO(DRILL-3494): Names (maxMem, getMaxMemoryUsed) are misleading; the value is peak memory allocated to fragment
     final MinorFragmentProfile maxMem = Collections.max(complete, Comparators.fragmentPeakMemory);
@@ -231,9 +246,9 @@ public class FragmentWrapper {
 
     Collections.sort(complete, Comparators.minorId);
 
-    Map<String, String> attributeMap = new HashMap<String, String>(); //Reusing for different fragments
+    Map<String, String> attributeMap = new HashMap<>(); //Reusing for different fragments
     for (final MinorFragmentProfile minor : complete) {
-      final ArrayList<OperatorProfile> ops = new ArrayList<>(minor.getOperatorProfileList());
+      final List<OperatorProfile> ops = new ArrayList<>(minor.getOperatorProfileList());
 
       long biggestIncomingRecords = 0;
       long biggestBatches = 0;
@@ -267,7 +282,7 @@ public class FragmentWrapper {
 
     for (final MinorFragmentProfile m : incomplete) {
       builder.appendCell(major.getMajorFragmentId() + "-" + m.getMinorFragmentId());
-      builder.appendRepeated(m.getState().toString(), null, NUM_NULLABLE_FRAGMENTS_COLUMNS);
+      builder.appendRepeated(m.getState().toString(), NUM_NULLABLE_FRAGMENTS_COLUMNS);
     }
     return builder.build();
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/HtmlAttribute.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/HtmlAttribute.java
new file mode 100644
index 0000000..75db298
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/HtmlAttribute.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.server.rest.profile;
+
+/**
+ * Define all attributes and values that can be injected by various Wrapper classes in org.apache.drill.exec.server.rest.*
+ */
+public class HtmlAttribute {
+  //Attributes
+  public static final String CLASS = "class";
+  public static final String DATA_ORDER = "data-order";
+  public static final String TITLE = "title";
+  public static final String SPILLS = "spills";
+  public static final String STYLE = "style";
+
+  //Values
+  public static final String CLASS_VALUE_SPILL_TAG = "spill-tag";
+  public static final String CLASS_VALUE_NO_PROGRESS_TAG = "no-progress-tag";
+  public static final String CLASS_VALUE_TIME_SKEW_TAG = "time-skew-tag";
+  public static final String CLASS_VALUE_SCAN_WAIT_TAG = "scan-wait-tag";
+  public static final String STYLE_VALUE_CURSOR_HELP = "cursor:help;";
+}
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/OperatorWrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/OperatorWrapper.java
index f6803bf..0f61170 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/OperatorWrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/OperatorWrapper.java
@@ -25,14 +25,16 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.ops.OperatorMetricRegistry;
 import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.proto.UserBitShared.MetricValue;
 import org.apache.drill.exec.proto.UserBitShared.OperatorProfile;
 import org.apache.drill.exec.proto.UserBitShared.StreamProfile;
-
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 
 /**
@@ -42,10 +44,6 @@ public class OperatorWrapper {
   @SuppressWarnings("unused")
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(OperatorWrapper.class);
 
-  private static final String HTML_ATTRIB_SPILLS = "spills";
-  private static final String HTML_ATTRIB_CLASS = "class";
-  private static final String HTML_ATTRIB_STYLE = "style";
-  private static final String HTML_ATTRIB_TITLE = "title";
   private static final DecimalFormat DECIMAL_FORMATTER = new DecimalFormat("#.##");
   private static final String UNKNOWN_OPERATOR = "UNKNOWN_OPERATOR";
   //Negative valued constant used for denoting invalid index to indicate absence of metric
@@ -56,8 +54,19 @@ public class OperatorWrapper {
   private final CoreOperatorType operatorType;
   private final String operatorName;
   private final int size;
+  private final int timeSkewMin;
+  private final double timeSkewRatio;
+  private final int scanWaitMin;
+  private final double waitSkewRatio;
+
+  public OperatorWrapper(int major, List<ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String>> opsAndHostsList, Map<String, String> phyOperMap, DrillConfig config) {
+    //Threshold to track if the slowest operator ran relatively slow
+    timeSkewMin = config.getInt(ExecConstants.PROFILE_WARNING_TIME_SKEW_MIN);
+    timeSkewRatio = config.getDouble(ExecConstants.PROFILE_WARNING_TIME_SKEW_RATIO_PROCESS);
+    //Threshold to track if the slowest SCAN operator spent more time in wait than processing
+    scanWaitMin = config.getInt(ExecConstants.PROFILE_WARNING_SCAN_WAIT_MIN);
+    waitSkewRatio = config.getDouble(ExecConstants.PROFILE_WARNING_TIME_SKEW_RATIO_WAIT);
 
-  public OperatorWrapper(int major, List<ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String>> opsAndHostsList, Map<String, String> phyOperMap) {
     Preconditions.checkArgument(opsAndHostsList.size() > 0);
     this.major = major;
     firstProfile = opsAndHostsList.get(0).getLeft().getLeft();
@@ -102,12 +111,12 @@ public class OperatorWrapper {
   public String getContent() {
     TableBuilder builder = new TableBuilder(OPERATOR_COLUMNS, OPERATOR_COLUMNS_TOOLTIP, true);
 
-    Map<String, String> attributeMap = new HashMap<String, String>(); //Reusing for different fragments
+    Map<String, String> attributeMap = new HashMap<>(); //Reusing for different fragments
     for (ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String> ip : opsAndHosts) {
       int minor = ip.getLeft().getRight();
       OperatorProfile op = ip.getLeft().getLeft();
 
-      attributeMap.put("data-order", String.valueOf(minor)); //Overwrite values from previous fragments
+      attributeMap.put(HtmlAttribute.DATA_ORDER, String.valueOf(minor)); //Overwrite values from previous fragments
       String path = new OperatorPathBuilder().setMajor(major).setMinor(minor).setOperator(op).build();
       builder.appendCell(path, attributeMap);
       builder.appendCell(ip.getRight());
@@ -150,17 +159,18 @@ public class OperatorWrapper {
   //Palette to help shade operators sharing a common major fragment
   private static final String[] OPERATOR_OVERVIEW_BGCOLOR_PALETTE = {"#ffffff","#f2f2f2"};
 
-  public void addSummary(TableBuilder tb, HashMap<String, Long> majorFragmentBusyTally, long majorFragmentBusyTallyTotal) {
+  public void addSummary(TableBuilder tb, Map<String, Long> majorFragmentBusyTally, long majorFragmentBusyTallyTotal) {
     //Select background color from palette
     String opTblBgColor = OPERATOR_OVERVIEW_BGCOLOR_PALETTE[major%OPERATOR_OVERVIEW_BGCOLOR_PALETTE.length];
     String path = new OperatorPathBuilder().setMajor(major).setOperator(firstProfile).build();
-    tb.appendCell(path, null, null, opTblBgColor);
+    tb.appendCell(path, opTblBgColor, null);
     tb.appendCell(operatorName);
 
     //Check if spill information is available
     int spillCycleMetricIndex = getSpillCycleMetricIndex(operatorType);
     boolean isSpillableOp = (spillCycleMetricIndex != NO_SPILL_METRIC_INDEX);
     boolean hasSpilledToDisk = false;
+    boolean isScanOp = operatorName.endsWith("SCAN");
 
     //Get MajorFragment Busy+Wait Time Tally
     long majorBusyNanos = majorFragmentBusyTally.get(new OperatorPathBuilder().setMajor(major).build());
@@ -208,15 +218,53 @@ public class OperatorWrapper {
     tb.appendNanos(Math.round(setupSum / size));
     tb.appendNanos(longSetup.getLeft().getSetupNanos());
 
+    Map<String, String> timeSkewMap = null;
     final ImmutablePair<OperatorProfile, Integer> longProcess = Collections.max(opList, Comparators.processTime);
-    tb.appendNanos(Math.round(processSum / size));
-    tb.appendNanos(longProcess.getLeft().getProcessNanos());
+    //Calculating average processing time
+    long avgProcTime = Math.round(processSum / size);
+    tb.appendNanos(avgProcTime);
+    long maxProcTime = longProcess.getLeft().getProcessNanos();
+    //Calculating skew of longest processing fragment w.r.t. average
+    double maxSkew = (avgProcTime > 0) ? maxProcTime/Double.valueOf(avgProcTime) : 0.0d;
+    //Marking skew if both thresholds are crossed
+    if (avgProcTime > TimeUnit.SECONDS.toNanos(timeSkewMin) && maxSkew > timeSkewRatio ) {
+      timeSkewMap = new HashMap<>();
+      timeSkewMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_TIME_SKEW_TAG);
+      timeSkewMap.put(HtmlAttribute.TITLE,  "One fragment took " + DECIMAL_FORMATTER.format(maxSkew) + " longer than average");
+      timeSkewMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
+    }
+    tb.appendNanos(maxProcTime, timeSkewMap);
 
     final ImmutablePair<OperatorProfile, Integer> shortWait = Collections.min(opList, Comparators.waitTime);
     final ImmutablePair<OperatorProfile, Integer> longWait = Collections.max(opList, Comparators.waitTime);
     tb.appendNanos(shortWait.getLeft().getWaitNanos());
-    tb.appendNanos(Math.round(waitSum / size));
-    tb.appendNanos(longWait.getLeft().getWaitNanos());
+    //Calculating average wait time for fragment
+    long avgWaitTime = Math.round(waitSum / size);
+
+    //Slow Scan Warning
+    Map<String, String> slowScanMap = null;
+    //Marking slow scan if threshold is crossed and wait was longer than processing
+    if (isScanOp && (avgWaitTime > TimeUnit.SECONDS.toNanos(scanWaitMin)) && (avgWaitTime > avgProcTime)) {
+      slowScanMap = new HashMap<>();
+      slowScanMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SCAN_WAIT_TAG);
+      slowScanMap.put(HtmlAttribute.TITLE, "Avg Wait Time &gt; Avg Processing Time");
+      slowScanMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
+    }
+    tb.appendNanos(avgWaitTime, slowScanMap);
+
+    long maxWaitTime = longWait.getLeft().getWaitNanos();
+    //Skewed Wait Warning
+    timeSkewMap = null; //Resetting
+    //Calculating skew of longest waiting fragment w.r.t. average
+    maxSkew = (avgWaitTime > 0) ? maxWaitTime/Double.valueOf(avgWaitTime) : 0.0d;
+    //Marking skew if both thresholds are crossed
+    if (avgWaitTime > TimeUnit.SECONDS.toNanos(timeSkewMin) && maxSkew > waitSkewRatio) {
+      timeSkewMap = new HashMap<>();
+      timeSkewMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_TIME_SKEW_TAG);
+      timeSkewMap.put(HtmlAttribute.TITLE, "One fragment waited " + DECIMAL_FORMATTER.format(maxSkew) + " longer than average");
+      timeSkewMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
+    }
+    tb.appendNanos(maxWaitTime, timeSkewMap);
 
     tb.appendPercent(processSum / majorBusyNanos);
     tb.appendPercent(processSum / majorFragmentBusyTallyTotal);
@@ -232,15 +280,15 @@ public class OperatorWrapper {
       avgSpillMap = new HashMap<>();
       //Average SpillCycle
       double avgSpillCycle = spillCycleSum/size;
-      avgSpillMap.put(HTML_ATTRIB_TITLE, DECIMAL_FORMATTER.format(avgSpillCycle) + " spills on average");
-      avgSpillMap.put(HTML_ATTRIB_STYLE, "cursor:help;" + spillCycleMax);
-      avgSpillMap.put(HTML_ATTRIB_CLASS, "spill-tag"); //JScript will inject Icon
-      avgSpillMap.put(HTML_ATTRIB_SPILLS, DECIMAL_FORMATTER.format(avgSpillCycle)); //JScript will inject Count
+      avgSpillMap.put(HtmlAttribute.TITLE, DECIMAL_FORMATTER.format(avgSpillCycle) + " spills on average");
+      avgSpillMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
+      avgSpillMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SPILL_TAG); //JScript will inject Icon
+      avgSpillMap.put(HtmlAttribute.SPILLS, DECIMAL_FORMATTER.format(avgSpillCycle)); //JScript will inject Count
       maxSpillMap = new HashMap<>();
-      maxSpillMap.put(HTML_ATTRIB_TITLE, "Most # spills: " + spillCycleMax);
-      maxSpillMap.put(HTML_ATTRIB_STYLE, "cursor:help;" + spillCycleMax);
-      maxSpillMap.put(HTML_ATTRIB_CLASS, "spill-tag"); //JScript will inject Icon
-      maxSpillMap.put(HTML_ATTRIB_SPILLS, String.valueOf(spillCycleMax)); //JScript will inject Count
+      maxSpillMap.put(HtmlAttribute.TITLE, "Most # spills: " + spillCycleMax);
+      maxSpillMap.put(HtmlAttribute.STYLE, HtmlAttribute.STYLE_VALUE_CURSOR_HELP);
+      maxSpillMap.put(HtmlAttribute.CLASS, HtmlAttribute.CLASS_VALUE_SPILL_TAG); //JScript will inject Icon
+      maxSpillMap.put(HtmlAttribute.SPILLS, String.valueOf(spillCycleMax)); //JScript will inject Count
     }
 
     tb.appendBytes(Math.round(memSum / size), avgSpillMap);
@@ -312,7 +360,7 @@ public class OperatorWrapper {
 
       final Number[] values = new Number[metricNames.length];
       //Track new/Unknown Metrics
-      final Set<Integer> unknownMetrics = new TreeSet<Integer>();
+      final Set<Integer> unknownMetrics = new TreeSet<>();
       for (final MetricValue metric : op.getMetricList()) {
         if (metric.getMetricId() < metricNames.length) {
           if (metric.hasLongValue()) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
index 506900d..af2b790 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
@@ -201,10 +201,10 @@ public class ProfileResources {
     private List<ProfileInfo> finishedQueries;
     private List<String> errors;
 
-    public QProfiles(List<ProfileInfo> runningQueries, List<ProfileInfo> finishedQueries, List<String> erorrs) {
+    public QProfiles(List<ProfileInfo> runningQueries, List<ProfileInfo> finishedQueries, List<String> errors) {
       this.runningQueries = runningQueries;
       this.finishedQueries = finishedQueries;
-      this.errors = erorrs;
+      this.errors = errors;
     }
 
     public List<ProfileInfo> getRunningQueries() {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileWrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileWrapper.java
index 35a3706..7e72556 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileWrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileWrapper.java
@@ -30,6 +30,7 @@ import java.util.stream.Collectors;
 
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.proto.UserBitShared.MajorFragmentProfile;
 import org.apache.drill.exec.proto.UserBitShared.MinorFragmentProfile;
@@ -40,9 +41,9 @@ import org.apache.drill.exec.proto.helper.QueryIdHelper;
 import org.apache.drill.exec.server.options.OptionList;
 import org.apache.drill.exec.server.options.OptionValue;
 import org.apache.drill.exec.server.rest.WebServer;
+import org.apache.drill.shaded.guava.com.google.common.base.CaseFormat;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.drill.shaded.guava.com.google.common.base.CaseFormat;
 
 /**
  * Wrapper class for a {@link #profile query profile}, so it to be presented through web UI.
@@ -57,11 +58,12 @@ public class ProfileWrapper {
   private final String id;
   private final List<FragmentWrapper> fragmentProfiles;
   private final List<OperatorWrapper> operatorProfiles;
-  private final HashMap<String, Long> majorFragmentTallyMap;
+  private final Map<String, Long> majorFragmentTallyMap;
   private final long majorFragmentTallyTotal;
   private final OptionList options;
   private final boolean onlyImpersonationEnabled;
   private Map<String, String> physicalOperatorMap;
+  private final String noProgressWarningThreshold;
 
   public ProfileWrapper(final QueryProfile profile, DrillConfig drillConfig) {
     this.profile = profile;
@@ -76,7 +78,7 @@ public class ProfileWrapper {
     Collections.sort(majors, Comparators.majorId);
 
     for (final MajorFragmentProfile major : majors) {
-      fragmentProfiles.add(new FragmentWrapper(major, profile.getStart()));
+      fragmentProfiles.add(new FragmentWrapper(major, profile.getStart(), drillConfig));
     }
     this.fragmentProfiles = fragmentProfiles;
     this.majorFragmentTallyMap = new HashMap<>(majors.size());
@@ -115,7 +117,7 @@ public class ProfileWrapper {
     Collections.sort(keys);
 
     for (final ImmutablePair<Integer, Integer> ip : keys) {
-      ows.add(new OperatorWrapper(ip.getLeft(), opmap.get(ip), physicalOperatorMap));
+      ows.add(new OperatorWrapper(ip.getLeft(), opmap.get(ip), physicalOperatorMap, drillConfig));
     }
     this.operatorProfiles = ows;
 
@@ -129,6 +131,7 @@ public class ProfileWrapper {
     this.options = options;
 
     this.onlyImpersonationEnabled = WebServer.isImpersonationOnlyEnabled(drillConfig);
+    this.noProgressWarningThreshold = String.valueOf(drillConfig.getInt(ExecConstants.PROFILE_WARNING_PROGRESS_THRESHOLD));
   }
 
   private long tallyMajorFragmentCost(List<MajorFragmentProfile> majorFragments) {
@@ -260,6 +263,11 @@ public class ProfileWrapper {
     return NOT_AVAILABLE_LABEL;
   }
 
+  //Threshold to be used by WebServer in issuing warning
+  public String getNoProgressWarningThreshold() {
+    return this.noProgressWarningThreshold;
+  }
+
   public List<FragmentWrapper> getFragmentProfiles() {
     return fragmentProfiles;
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/TableBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/TableBuilder.java
index ad89b3f..615af0b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/TableBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/TableBuilder.java
@@ -26,6 +26,7 @@ import java.util.Locale;
 import java.util.Map;
 
 public class TableBuilder {
+  private static final String NO_BGCOLOR = "";
   private final NumberFormat format = NumberFormat.getInstance(Locale.US);
   private final DateFormat dateFormat = new SimpleDateFormat("HH:mm:ss");
   private final DecimalFormat dec = new DecimalFormat("0.00");
@@ -45,10 +46,10 @@ public class TableBuilder {
 
     format.setMaximumFractionDigits(3);
 
-    sb.append("<table class=\"table table-bordered text-right"+(isSortable? " sortable" : "")+"\">\n<thead><tr>");
+    sb.append("<table class=\"table table-bordered text-right"+(isSortable? " sortable" : NO_BGCOLOR)+"\">\n<thead><tr>");
     for (int i = 0; i < columns.length; i++) {
       String cn = columns[i];
-      String ctt = "";
+      String ctt = NO_BGCOLOR;
       if (columnTooltip != null) {
         String tooltip = columnTooltip[i];
         if (tooltip != null) {
@@ -61,37 +62,26 @@ public class TableBuilder {
   }
 
   public void appendCell(final String s) {
-    appendCell(s, null, null, null);
+    appendCell(s, NO_BGCOLOR, null);
   }
 
-  public void appendCell(final String s, final String link) {
-    appendCell(s, link, null, null);
-  }
-
-  public void appendCell(final String s, final String link, final String titleText) {
-    appendCell(s, link, titleText, null);
-  }
-
-  public void appendCell(final String s, final String link, final String titleText, final String backgroundColor) {
-    appendCell(s, link, titleText, backgroundColor, null);
+  public void appendCell(final String s, final String backgroundColor) {
+    appendCell(s, backgroundColor, null);
   }
 
   public void appendCell(final String s, final Map<String, String> kvPairs) {
-    appendCell(s, null, null, null, kvPairs);
+    appendCell(s, NO_BGCOLOR, kvPairs);
   }
 
-  public void appendCell(final String s, final String link, final String titleText, final String backgroundColor,
-      final Map<String, String> kvPairs) {
+  //Inject value into a table cell. Start or end a row if required
+  public void appendCell(final String s, final String rowBackgroundColor, final Map<String, String> kvPairs) {
+    //Check if this is first column?
     if (w == 0) {
       sb.append("<tr"
-          + (backgroundColor == null ? "" : " style=\"background-color:"+backgroundColor+"\"")
+          + (rowBackgroundColor == null || rowBackgroundColor == NO_BGCOLOR ? "" : " style=\"background-color:"+rowBackgroundColor+"\"")
           + ">");
     }
     StringBuilder tdElemSB = new StringBuilder("<td");
-    //Injecting title if specified (legacy impl)
-    if (titleText != null && titleText.length() > 0) {
-      tdElemSB.append(" title=\""+titleText+"\"");
-    }
     //Extract other attributes for injection into element
     if (kvPairs != null) {
       for (String attributeName : kvPairs.keySet()) {
@@ -99,8 +89,8 @@ public class TableBuilder {
         tdElemSB.append(attributeText);
       }
     }
-    //Closing <td>
-    tdElemSB.append(String.format(">%s%s</td>", s, link != null ? link : ""));
+    //Inserting inner text value and closing <td>
+    tdElemSB.append(">").append(s).append("</td>");
     sb.append(tdElemSB);
     if (++w >= width) {
       sb.append("</tr>\n");
@@ -108,13 +98,14 @@ public class TableBuilder {
     }
   }
 
-  public void appendRepeated(final String s, final String link, final int n) {
-    appendRepeated(s, link, n, null);
+  public void appendRepeated(final String s, final int n) {
+    appendRepeated(s, n, null);
   }
 
-  public void appendRepeated(final String s, final String link, final int n, final String tooltip) {
+  //Inject a value repeatedly into a table cell
+  public void appendRepeated(final String s, final int n, final Map<String, String> attributeMap) {
     for (int i = 0; i < n; i++) {
-      appendCell(s, link, tooltip);
+      appendCell(s, attributeMap);
     }
   }
 
@@ -122,109 +113,82 @@ public class TableBuilder {
     appendTime(d, null);
   }
 
-  public void appendTime(final long d, final String link) {
-    appendTime(d, link, null);
-  }
-
-  public void appendTime(final long d, final String link, final String tooltip) {
+  //Inject timestamp/date value with ordering into a table cell
+  public void appendTime(final long d, Map<String, String> attributeMap) {
     //Embedding dataTable's data-order attribute
-    Map<String, String> attributeMap = new HashMap<String, String>();
-    attributeMap.put("data-order", String.valueOf(d));
-    appendCell(dateFormat.format(d), link, tooltip, null, attributeMap);
+    if (attributeMap == null) {
+      attributeMap = new HashMap<>();
+    }
+    attributeMap.put(HtmlAttribute.DATA_ORDER, String.valueOf(d));
+    appendCell(dateFormat.format(d), null, attributeMap);
   }
 
   public void appendMillis(final long p) {
     appendMillis(p, null);
   }
 
-  public void appendMillis(final long p, final String link) {
-    appendMillis(p, link, null);
-  }
-
-  public void appendMillis(final long p, final String link, final String tooltip) {
+  //Inject millisecond based time value with ordering into a table cell
+  public void appendMillis(final long p, Map<String, String> attributeMap) {
     //Embedding dataTable's data-order attribute
-    Map<String, String> attributeMap = new HashMap<String, String>();
-    attributeMap.put("data-order", String.valueOf(p));
-    appendCell((new SimpleDurationFormat(0, p)).compact(), link, tooltip, null, attributeMap);
+    if (attributeMap == null) {
+      attributeMap = new HashMap<>();
+    }
+    attributeMap.put(HtmlAttribute.DATA_ORDER, String.valueOf(p));
+    appendCell((new SimpleDurationFormat(0, p)).compact(), NO_BGCOLOR, attributeMap);
   }
 
   public void appendNanos(final long p) {
-    appendNanos(p, null, null);
+    appendNanos(p, null);
   }
 
-  public void appendNanos(final long p, final String link) {
-    appendNanos(p, link, null);
-  }
-
-  public void appendNanos(final long p, final String link, final String tooltip) {
-    appendMillis(Math.round(p / 1000.0 / 1000.0), link, tooltip);
+  public void appendNanos(final long p, Map<String, String> attributeMap) {
+    appendMillis(Math.round(p / 1000.0 / 1000.0), attributeMap);
   }
 
   public void appendPercent(final double percentAsFraction) {
-    appendCell(dec.format(100*percentAsFraction).concat("%"), null, null);
-  }
-
-  public void appendPercent(final double percentAsFraction, final String link) {
-    appendCell(dec.format(100*percentAsFraction).concat("%"), link, null);
+    appendCell(dec.format(100*percentAsFraction).concat("%"), NO_BGCOLOR, null);
   }
 
-  public void appendPercent(final double percentAsFraction, final String link, final String tooltip) {
-    appendCell(dec.format(100*percentAsFraction).concat("%"), link, tooltip);
+  //Inject value as a percentage with value between 0 and 100 into a table cell
+  public void appendPercent(final double percentAsFraction, Map<String, String> attributeMap) {
+    appendCell(dec.format(100*percentAsFraction).concat("%"), NO_BGCOLOR, attributeMap);
   }
 
   public void appendFormattedNumber(final Number n) {
-    appendCell(format.format(n), null, null);
+    appendCell(format.format(n), NO_BGCOLOR, null);
   }
 
-  public void appendFormattedNumber(final Number n, final String link) {
-    appendCell(format.format(n), link, null);
-  }
-
-  public void appendFormattedNumber(final Number n, final String link, final String tooltip) {
-    appendCell(format.format(n), link, tooltip);
+  public void appendFormattedNumber(final Number n, Map<String, String> attributeMap) {
+    appendCell(format.format(n), NO_BGCOLOR, attributeMap);
   }
 
   public void appendFormattedInteger(final long n) {
-    appendCell(intformat.format(n), null, null);
-  }
-
-  public void appendFormattedInteger(final long n, final String link) {
-    appendCell(intformat.format(n), link, null);
+    appendCell(intformat.format(n), NO_BGCOLOR, null);
   }
 
-  public void appendFormattedInteger(final long n, final String link, final String tooltip) {
-    appendCell(intformat.format(n), link, tooltip);
+  public void appendFormattedInteger(final long n, Map<String, String> attributeMap) {
+    appendCell(intformat.format(n), NO_BGCOLOR, attributeMap);
   }
 
-  public void appendInteger(final long l, final String link, final String tooltip) {
-    appendCell(Long.toString(l), link, tooltip);
+  public void appendInteger(final long l, Map<String, String> attributeMap) {
+    appendCell(Long.toString(l), NO_BGCOLOR, attributeMap);
   }
 
   public void appendBytes(final long l) {
-    appendBytes(l, null, null, null);
-  }
-
-  public void appendBytes(final long l, final String link) {
-    appendBytes(l, link, null);
-  }
-
-  public void appendBytes(final long l, final String link, final String tooltip) {
-    appendBytes(l, link, tooltip, null);
+    appendBytes(l, null);
   }
 
+  //Inject print-friendly byte value with ordering into a table cell
   public void appendBytes(final long l, Map<String, String> attributeMap) {
-    appendBytes(l, null, null, attributeMap);
-  }
-
-  public void appendBytes(final long l, final String link, final String tooltip, Map<String, String> attributeMap) {
     //Embedding dataTable's data-order attribute
     if (attributeMap == null) {
       attributeMap = new HashMap<>();
     }
-    attributeMap.put("data-order", String.valueOf(l));
-    appendCell(bytePrint(l), link, tooltip, null, attributeMap);
+    attributeMap.put(HtmlAttribute.DATA_ORDER, String.valueOf(l));
+    appendCell(bytePrint(l), NO_BGCOLOR, attributeMap);
   }
 
+  //Generate a print-friendly representation of a byte count
   private String bytePrint(final long size) {
     final double t = size / Math.pow(1024, 4);
     if (t > 1) {
diff --git a/exec/java-exec/src/main/resources/drill-module.conf b/exec/java-exec/src/main/resources/drill-module.conf
index e792b20..ac35cd9 100644
--- a/exec/java-exec/src/main/resources/drill-module.conf
+++ b/exec/java-exec/src/main/resources/drill-module.conf
@@ -142,6 +142,17 @@ drill.exec: {
       }
     }
     max_profiles: 100,
+    profile.warning: {
+      progress.threshold: 300,
+      time.skew: {
+        min: 2,
+        ratio: {
+          process: 2
+          wait: 2
+        }
+      },
+      scan.wait.min: 60
+    },
     session_max_idle_secs: 3600, # Default value 1hr
     cors: {
       enabled: false,
diff --git a/exec/java-exec/src/main/resources/rest/profile/profile.ftl b/exec/java-exec/src/main/resources/rest/profile/profile.ftl
index ee126ab..6b4e732 100644
--- a/exec/java-exec/src/main/resources/rest/profile/profile.ftl
+++ b/exec/java-exec/src/main/resources/rest/profile/profile.ftl
@@ -48,8 +48,40 @@
         "lengthChange": false,
         "paging": false,
         "info": false
-      }
-    );} );
+      });
+      //Enable Warnings by making it visible
+      checkForWarnings();
+    });
+
+    //Check for Warnings
+    function checkForWarnings() {
+      //No Progress Warning
+      let noProgressFragmentCount = document.querySelectorAll('td[class=no-progress-tag]').length;
+      let majorFragmentCount = document.querySelectorAll('#fragment-overview table tbody tr').length;
+      toggleWarning("noProgressWarning", majorFragmentCount, noProgressFragmentCount);
+
+      //Spill To Disk Warnings
+      let spillCount = document.querySelectorAll('td[class=spill-tag]').length;
+      toggleWarning("spillToDiskWarning", true, (spillCount > 0));
+
+      //Slow Scan Warnings
+      let longScanWaitCount = document.querySelectorAll('td[class=scan-wait-tag]').length;
+      toggleWarning("longScanWaitWarning", true, (longScanWaitCount > 0));
+    }
+
+    //Show Warnings
+    function toggleWarning(warningElemId, expectedVal, actualVal) {
+        if (expectedVal == actualVal) {
+            document.getElementById(warningElemId).style.display="block";
+        } else {
+            closeWarning(warningElemId);
+        }
+    }
+
+    //Close Warning
+    function closeWarning(warningElemId) {
+        document.getElementById(warningElemId).style.display="none";
+    }
 
     //Close the cancellation status popup
     function refreshStatus() {
@@ -211,7 +243,7 @@ table.sortable thead .sorting_desc { background-image: url("/static/img/black-de
   </div>
   </#if>
   </h3>
-  
+
   <div class="panel-group" id="query-profile-accordion">
     <div class="panel panel-default">
       <div class="panel-heading">
@@ -325,26 +357,31 @@ table.sortable thead .sorting_desc { background-image: url("/static/img/black-de
       <div id="fragment-overview" class="panel-collapse collapse">
         <div class="panel-body">
           <svg id="fragment-overview-canvas" class="center-block"></svg>
+          <div id="noProgressWarning" style="display:none;cursor:help" class="panel panel-warning">
+            <div class="panel-heading" title="Check if any of the Drillbits are waiting for data from a SCAN operator, or might actually be hung with its VM thread being busy." style="cursor:pointer">
+            <span class="glyphicon glyphicon-alert" style="font-size:125%">&#xe209;</span> <b>WARNING:</b> No fragments have made any progress in the last <b>${model.getNoProgressWarningThreshold()}</b> seconds. (See <span style="font-style:italic;font-weight:bold">Last Progress</span> below)
+            </div>
+          </div>
           ${model.getFragmentsOverview()?no_esc}
         </div>
       </div>
-    </div>
-    <#list model.getFragmentProfiles() as frag>
-    <div class="panel panel-default">
-      <div class="panel-heading">
-        <h4 class="panel-title">
-          <a data-toggle="collapse" href="#${frag.getId()}">
-            ${frag.getDisplayName()}
-          </a>
-        </h4>
-      </div>
-      <div id="${frag.getId()}" class="panel-collapse collapse">
-        <div class="panel-body">
-          ${frag.getContent()?no_esc}
+      <#list model.getFragmentProfiles() as frag>
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          <h4 class="panel-title">
+            <a data-toggle="collapse" href="#${frag.getId()}">
+              ${frag.getDisplayName()}
+            </a>
+          </h4>
+        </div>
+        <div id="${frag.getId()}" class="panel-collapse collapse">
+          <div class="panel-body">
+            ${frag.getContent()?no_esc}
+          </div>
         </div>
       </div>
+      </#list>
     </div>
-    </#list>
   </div>
 
   <div class="page-header"></div>
@@ -361,6 +398,17 @@ table.sortable thead .sorting_desc { background-image: url("/static/img/black-de
       </div>
       <div id="operator-overview" class="panel-collapse collapse">
         <div class="panel-body">
+          <div id="spillToDiskWarning" style="display:none;cursor:help" class="panel panel-warning" title="Spills occur because a buffered operator didn't get enough memory to hold data in memory. Increase the memory or ensure that number of spills &lt; 2">
+            <div class="panel-heading"><span class="glyphicon glyphicon-alert" style="font-size:125%">&#xe209;</span> <b>WARNING:</b> Some operators have data spilled to disk. This will result in performance loss. (See <span style="font-style:italic;font-weight:bold">Avg Peak Memory</span> and <span style="font-style:italic;font-weight:bold">Max Peak Memory</span> below)
+            <button type="button" class="close" onclick="closeWarning('spillToDiskWarning')" style="font-size:180%">&times;</button>
+            </div>
+          </div>
+          <div id="longScanWaitWarning" style="display:none;cursor:help" class="panel panel-warning">
+            <div class="panel-heading" title="Check if any of the Drillbits are waiting for data from a SCAN operator, or might actually be hung with its VM thread being busy." style="cursor:pointer">
+            <span class="glyphicon glyphicon-alert" style="font-size:125%">&#xe209;</span> <b>WARNING:</b> Some of the SCAN operators spent more time waiting for the data than processing it. (See <span style="font-style:italic;font-weight:bold">Avg Wait Time</span> as compared to <span style="font-style:italic;font-weight:bold">Average Process Time</span> for the <b>SCAN</b> operators below)
+            <button type="button" class="close" onclick="closeWarning('longScanWaitWarning')" style="font-size:180%">&times;</button>
+            </div>
+          </div>
           ${model.getOperatorsOverview()?no_esc}
         </div>
       </div>
@@ -413,14 +461,33 @@ table.sortable thead .sorting_desc { background-image: url("/static/img/black-de
     <script>
     //Inject Spilled Tags
     $(window).on('load', function () {
-      var spillLabel = document.getElementsByClassName("spill-tag");
-      var i;
-      for (i = 0; i < spillLabel.length; i++) {
-        var content = spillLabel[i].innerHTML;
-        spillLabel[i].innerHTML = "<span class=\"glyphicon glyphicon-download-alt\">&nbsp;</span>"+content;
-      }
+      injectIconByClass("spill-tag","glyphicon-download-alt");
+      injectIconByClass("time-skew-tag","glyphicon-time");
+      injectSlowScanIcon();
     });
 
+    //Inject Glyphicon by Class tag
+    function injectIconByClass(tagLabel, tagIcon) {
+        //Inject Spill icons
+        var tagElemList = document.getElementsByClassName(tagLabel);
+        var i;
+        for (i = 0; i < tagElemList.length; i++) {
+            var content = tagElemList[i].innerHTML;
+            tagElemList[i].innerHTML = "<span class=\"glyphicon "+tagIcon+"\">&nbsp;</span>"+content;
+        }
+    }
+
+    //Inject PNG icon for slow
+    function injectSlowScanIcon() {
+        //Inject Spill icons
+        var tagElemList = document.getElementsByClassName("scan-wait-tag");
+        var i;
+        for (i = 0; i < tagElemList.length; i++) {
+            var content = tagElemList[i].innerHTML;
+            tagElemList[i].innerHTML = "<img src='/static/img/turtle.png' alt='slow'> "+content;
+        }
+    }
+
     //Configuration for Query Viewer in Profile
     ace.require("ace/ext/language_tools");
     var viewer = ace.edit("query-text");
diff --git a/exec/java-exec/src/main/resources/rest/static/img/turtle.png b/exec/java-exec/src/main/resources/rest/static/img/turtle.png
new file mode 100644
index 0000000..ec394a4
Binary files /dev/null and b/exec/java-exec/src/main/resources/rest/static/img/turtle.png differ


[drill] 06/10: DRILL-6907: Fix hive-exec-shaded classes recognition in IntelliJ IDEA closes #1575

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit f687da853d8a35c3b34d9efd05f85bc37d69d14d
Author: Volodymyr Vysotskyi <vv...@gmail.com>
AuthorDate: Sun Dec 16 01:17:16 2018 +0200

    DRILL-6907: Fix hive-exec-shaded classes recognition in IntelliJ IDEA
    closes #1575
---
 contrib/storage-hive/hive-exec-shade/pom.xml | 35 ++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/contrib/storage-hive/hive-exec-shade/pom.xml b/contrib/storage-hive/hive-exec-shade/pom.xml
index 1b412e6..824f9a4 100644
--- a/contrib/storage-hive/hive-exec-shade/pom.xml
+++ b/contrib/storage-hive/hive-exec-shade/pom.xml
@@ -101,6 +101,14 @@
           </artifactSet>
           <createDependencyReducedPom>false</createDependencyReducedPom>
           <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
+          <createSourcesJar>true</createSourcesJar>
+          <shadeSourcesContent>true</shadeSourcesContent>
+          <transformers>
+            <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
+            <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
+              <addHeader>false</addHeader>
+            </transformer>
+          </transformers>
           <relocations>
             <relocation>
               <pattern>com.google.</pattern>
@@ -176,6 +184,33 @@
       </plugin>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>attach-hive-shaded</id>
+            <phase>package</phase>
+            <goals>
+              <goal>attach-artifact</goal>
+            </goals>
+            <configuration>
+              <artifacts>
+                <artifact>
+                  <file>${project.build.directory}/${project.artifactId}-${project.version}.jar</file>
+                  <type>jar</type>
+                  <classifier>jar</classifier>
+                </artifact>
+                <artifact>
+                  <file>${project.build.directory}/${project.artifactId}-${project.version}-sources.jar</file>
+                  <type>jar</type>
+                  <classifier>sources</classifier>
+                </artifact>
+              </artifacts>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
         <configuration>
           <skip>true</skip>


[drill] 03/10: DRILL-6931: File listing: fix issue for S3 directory objects and improve performance for recursive listing closes #1590

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit 7108f162cd4f18121aa9a8ace76326bd5fbf8264
Author: Arina Ielchiieva <ar...@gmail.com>
AuthorDate: Fri Dec 28 19:45:38 2018 +0200

    DRILL-6931: File listing: fix issue for S3 directory objects and improve performance for recursive listing
    closes #1590
---
 .../planner/sql/handlers/ShowFilesHandler.java     |   8 +-
 .../store/ischema/InfoSchemaRecordGenerator.java   |   3 +-
 .../org/apache/drill/exec/util/FileSystemUtil.java | 230 ++++++++++++---------
 3 files changed, 144 insertions(+), 97 deletions(-)

diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFilesHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFilesHandler.java
index 9782bbf..3398340 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFilesHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowFilesHandler.java
@@ -50,7 +50,7 @@ public class ShowFilesHandler extends DefaultSqlHandler {
     SchemaPlus drillSchema = defaultSchema;
     SqlShowFiles showFiles = unwrap(sqlNode, SqlShowFiles.class);
     SqlIdentifier from = showFiles.getDb();
-    String fromDir = "./";
+    String fromDir = null;
 
     // Show files can be used without from clause, in which case we display the files in the default schema
     if (from != null) {
@@ -61,7 +61,7 @@ public class ShowFilesHandler extends DefaultSqlHandler {
         // Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
         drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1));
         // Listing for specific directory: show files in dfs.tmp.specific_directory
-        fromDir = fromDir + from.names.get((from.names.size() - 1));
+        fromDir = from.names.get((from.names.size() - 1));
       }
 
       if (drillSchema == null) {
@@ -81,7 +81,9 @@ public class ShowFilesHandler extends DefaultSqlHandler {
           .build(logger);
     }
 
-    Path path = new Path(wsSchema.getDefaultLocation(), fromDir);
+    Path endPath = fromDir == null ? new Path(wsSchema.getDefaultLocation()) : new Path(wsSchema.getDefaultLocation(), fromDir);
+    // add URI to the path to ensure that directory objects are skipped (see S3AFileSystem.listStatus method)
+    Path path = new Path(wsSchema.getFS().getUri().toString(), endPath);
     List<ShowFilesCommandResult> records = FileSystemUtil.listAllSafe(wsSchema.getFS(), path, false).stream()
         // use ShowFilesCommandResult for backward compatibility
         .map(fileStatus -> new ShowFilesCommandResult(new Records.File(wsSchema.getFullSchemaName(), wsSchema, fileStatus)))
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaRecordGenerator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaRecordGenerator.java
index 1e72840..bb49e17 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaRecordGenerator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaRecordGenerator.java
@@ -435,7 +435,8 @@ public abstract class InfoSchemaRecordGenerator<S> {
           String defaultLocation = wsSchema.getDefaultLocation();
           FileSystem fs = wsSchema.getFS();
           boolean recursive = optionManager.getBoolean(ExecConstants.LIST_FILES_RECURSIVELY);
-          FileSystemUtil.listAllSafe(fs, new Path(defaultLocation), recursive).forEach(
+          // add URI to the path to ensure that directory objects are skipped (see S3AFileSystem.listStatus method)
+          FileSystemUtil.listAllSafe(fs, new Path(fs.getUri().toString(), defaultLocation), recursive).forEach(
               fileStatus -> records.add(new Records.File(schemaName, wsSchema, fileStatus))
           );
         }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/util/FileSystemUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/util/FileSystemUtil.java
index 47ac44c..82500da 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/util/FileSystemUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/util/FileSystemUtil.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.util;
 
+import org.apache.drill.common.exceptions.ErrorHelper;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -25,7 +26,12 @@ import org.apache.hadoop.fs.PathFilter;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.ForkJoinTask;
+import java.util.concurrent.RecursiveTask;
+import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
 /**
@@ -42,6 +48,15 @@ public class FileSystemUtil {
   public static final PathFilter DUMMY_FILTER = path -> true;
 
   /**
+   * Indicates which file system objects should be returned during listing.
+   */
+  private enum Scope {
+    DIRECTORIES,
+    FILES,
+    ALL
+  }
+
+  /**
    * Returns statuses of all directories present in given path applying custom filters if present.
    * Will also include nested directories if recursive flag is set to true.
    *
@@ -51,10 +66,8 @@ public class FileSystemUtil {
    * @param filters list of custom filters (optional)
    * @return list of matching directory statuses
    */
-  public static List<FileStatus> listDirectories(final FileSystem fs, Path path, boolean recursive, PathFilter... filters) throws IOException {
-    List<FileStatus> statuses = new ArrayList<>();
-    listDirectories(fs, path, recursive, false, statuses, mergeFilters(filters));
-    return statuses;
+  public static List<FileStatus> listDirectories(FileSystem fs, Path path, boolean recursive, PathFilter... filters) throws IOException {
+    return list(fs, path, Scope.DIRECTORIES, recursive, false, filters);
   }
 
   /**
@@ -68,14 +81,13 @@ public class FileSystemUtil {
    * @param filters list of custom filters (optional)
    * @return list of matching directory statuses
    */
-  public static List<FileStatus> listDirectoriesSafe(final FileSystem fs, Path path, boolean recursive, PathFilter... filters) {
-    List<FileStatus> statuses = new ArrayList<>();
+  public static List<FileStatus> listDirectoriesSafe(FileSystem fs, Path path, boolean recursive, PathFilter... filters) {
     try {
-      listDirectories(fs, path, recursive, true, statuses, mergeFilters(filters));
+      return list(fs, path, Scope.DIRECTORIES, recursive, true, filters);
     } catch (Exception e) {
       // all exceptions are ignored
+      return Collections.emptyList();
     }
-    return statuses;
   }
 
   /**
@@ -89,9 +101,7 @@ public class FileSystemUtil {
    * @return list of matching file statuses
    */
   public static List<FileStatus> listFiles(FileSystem fs, Path path, boolean recursive, PathFilter... filters) throws IOException {
-    List<FileStatus> statuses = new ArrayList<>();
-    listFiles(fs, path, recursive, false, statuses, mergeFilters(filters));
-    return statuses;
+    return list(fs, path, Scope.FILES, recursive, false, filters);
   }
 
   /**
@@ -105,13 +115,12 @@ public class FileSystemUtil {
    * @return list of matching file statuses
    */
   public static List<FileStatus> listFilesSafe(FileSystem fs, Path path, boolean recursive, PathFilter... filters) {
-    List<FileStatus> statuses = new ArrayList<>();
     try {
-      listFiles(fs, path, recursive, true, statuses, mergeFilters(filters));
+      return list(fs, path, Scope.FILES, recursive, true, filters);
     } catch (Exception e) {
       // all exceptions are ignored
+      return Collections.emptyList();
     }
-    return statuses;
   }
 
   /**
@@ -125,9 +134,7 @@ public class FileSystemUtil {
    * @return list of matching directory and file statuses
    */
   public static List<FileStatus> listAll(FileSystem fs, Path path, boolean recursive, PathFilter... filters) throws IOException {
-    List<FileStatus> statuses = new ArrayList<>();
-    listAll(fs, path, recursive, false, statuses, mergeFilters(filters));
-    return statuses;
+    return list(fs, path, Scope.ALL, recursive, false, filters);
   }
 
   /**
@@ -142,13 +149,12 @@ public class FileSystemUtil {
    * @return list of matching directory and file statuses
    */
   public static List<FileStatus> listAllSafe(FileSystem fs, Path path, boolean recursive, PathFilter... filters) {
-    List<FileStatus> statuses = new ArrayList<>();
     try {
-      listAll(fs, path, recursive, true, statuses, mergeFilters(filters));
+      return list(fs, path, Scope.ALL, recursive, true, filters);
     } catch (Exception e) {
       // all exceptions are ignored
+      return Collections.emptyList();
     }
-    return statuses;
   }
 
   /**
@@ -177,7 +183,7 @@ public class FileSystemUtil {
    * @param filters array of filters
    * @return one filter that combines all given filters
    */
-  public static PathFilter mergeFilters(final PathFilter... filters) {
+  public static PathFilter mergeFilters(PathFilter... filters) {
     if (filters.length == 0) {
       return DUMMY_FILTER;
     }
@@ -186,103 +192,141 @@ public class FileSystemUtil {
   }
 
   /**
-   * Helper method that will store in given holder statuses of all directories present in given path applying custom filter.
-   * If recursive flag is set to true, will call itself recursively to add statuses of nested directories.
-   * If suppress exceptions flag is set to true, will ignore all exceptions during listing.
+   * Helper method that merges given filters into one and
+   * determines which listing method should be called based on recursive flag value.
    *
-   * @param fs current file system
-   * @param path path to directory
-   * @param recursive true if nested directories should be included
-   * @param suppressExceptions indicates if exceptions should be ignored during listing
-   * @param statuses holder for directory statuses
-   * @param filter custom filter
-   * @return holder with all matching directory statuses
+   * @param fs file system
+   * @param path path to file or directory
+   * @param scope file system objects scope
+   * @param recursive indicates if listing should be done recursively
+   * @param suppressExceptions indicates if exceptions should be ignored
+   * @param filters filters to be applied
+   * @return list of file statuses
    */
-  private static List<FileStatus> listDirectories(FileSystem fs, Path path, boolean recursive, boolean suppressExceptions,
-                                                  List<FileStatus> statuses, PathFilter filter) throws IOException {
-    try {
-      for (FileStatus status : fs.listStatus(path, filter)) {
-        if (status.isDirectory()) {
-          statuses.add(status);
-          if (recursive) {
-            listDirectories(fs, status.getPath(), true, suppressExceptions, statuses, filter);
-          }
-        }
-      }
-    } catch (Exception e) {
-      if (suppressExceptions) {
-        logger.debug("Exception during listing file statuses", e);
-      } else {
-        throw e;
-      }
-    }
-    return statuses;
+  private static List<FileStatus> list(FileSystem fs, Path path, Scope scope, boolean recursive, boolean suppressExceptions, PathFilter... filters) throws IOException {
+    PathFilter filter = mergeFilters(filters);
+    return recursive ? listRecursive(fs, path, scope, suppressExceptions, filter)
+      : listNonRecursive(fs, path, scope, suppressExceptions, filter);
   }
 
   /**
-   * Helper method that will store in given holder statuses of all files present in given path applying custom filter.
-   * If recursive flag is set to true, will call itself recursively to add file statuses from nested directories.
-   * If suppress exceptions flag is set to true, will ignore all exceptions during listing.
+   * Lists file statuses non-recursively based on given file system objects {@link Scope}.
    *
-   * @param fs current file system
+   * @param fs file system
    * @param path path to file or directory
-   * @param recursive true if files in nested directories should be included
-   * @param suppressExceptions indicates if exceptions should be ignored during listing
-   * @param statuses holder for file statuses
-   * @param filter custom filter
-   * @return holder with all matching file statuses
+   * @param scope file system objects scope
+   * @param suppressExceptions indicates if exceptions should be ignored
+   * @param filter filter to be applied
+   * @return list of file statuses
    */
-  private static List<FileStatus> listFiles(FileSystem fs, Path path, boolean recursive, boolean suppressExceptions,
-                                            List<FileStatus> statuses, PathFilter filter) throws IOException {
+  private static List<FileStatus> listNonRecursive(FileSystem fs, Path path, Scope scope, boolean suppressExceptions, PathFilter filter) throws IOException {
     try {
-      for (FileStatus status : fs.listStatus(path, filter)) {
-        if (status.isDirectory()) {
-          if (recursive) {
-            listFiles(fs, status.getPath(), true, suppressExceptions, statuses, filter);
-          }
-        } else {
-          statuses.add(status);
-        }
-      }
+      return Stream.of(fs.listStatus(path, filter))
+        .filter(status -> isStatusApplicable(status, scope))
+        .collect(Collectors.toList());
     } catch (Exception e) {
       if (suppressExceptions) {
         logger.debug("Exception during listing file statuses", e);
+        return Collections.emptyList();
       } else {
         throw e;
       }
     }
-    return statuses;
   }
 
   /**
-   * Helper method that will store in given holder statuses of all directories and files present in given path applying custom filter.
-   * If recursive flag is set to true, will call itself recursively to add nested directories and their file statuses.
-   * If suppress exceptions flag is set to true, will ignore all exceptions during listing.
+   * Lists file statuses recursively based on given file system objects {@link Scope}.
+   * Uses {@link ForkJoinPool} executor service and {@link RecursiveListing} task
+   * to parallel and speed up listing.
    *
-   * @param fs current file system
+   * @param fs file system
    * @param path path to file or directory
-   * @param recursive true if nested directories and their files should be included
-   * @param suppressExceptions indicates if exceptions should be ignored during listing
-   * @param statuses holder for directory and file statuses
-   * @param filter custom filter
-   * @return holder with all matching directory and file statuses
+   * @param scope file system objects scope
+   * @param suppressExceptions indicates if exceptions should be ignored
+   * @param filter filter to be applied
+   * @return list of file statuses
    */
-  private static List<FileStatus> listAll(FileSystem fs, Path path, boolean recursive, boolean suppressExceptions,
-                                          List<FileStatus> statuses, PathFilter filter) throws IOException {
+  private static List<FileStatus> listRecursive(FileSystem fs, Path path, Scope scope, boolean suppressExceptions, PathFilter filter) {
+    ForkJoinPool pool = new ForkJoinPool();
     try {
-      for (FileStatus status : fs.listStatus(path, filter)) {
-        statuses.add(status);
-        if (status.isDirectory() && recursive) {
-          listAll(fs, status.getPath(), true, suppressExceptions, statuses, filter);
+      RecursiveListing task = new RecursiveListing(fs, path, scope, suppressExceptions, filter);
+      return pool.invoke(task);
+    } finally {
+      pool.shutdown();
+    }
+  }
+
+  /**
+   * Checks if file status is applicable based on file system object {@link Scope}.
+   *
+   * @param status file status
+   * @param scope file system objects scope
+   * @return true if status is applicable, false otherwise
+   */
+  private static boolean isStatusApplicable(FileStatus status, Scope scope) {
+    switch (scope) {
+      case DIRECTORIES:
+        return status.isDirectory();
+      case FILES:
+        return status.isFile();
+      case ALL:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  /**
+   * Task that parallels file status listing for each nested directory,
+   * gathers and returns common list of file statuses.
+   */
+  private static class RecursiveListing extends RecursiveTask<List<FileStatus>> {
+
+    private final FileSystem fs;
+    private final Path path;
+    private final Scope scope;
+    private final boolean suppressExceptions;
+    private final PathFilter filter;
+
+    RecursiveListing(FileSystem fs, Path path, Scope scope, boolean suppressExceptions, PathFilter filter) {
+      this.fs = fs;
+      this.path = path;
+      this.scope = scope;
+      this.suppressExceptions = suppressExceptions;
+      this.filter = filter;
+    }
+
+    @Override
+    protected List<FileStatus> compute() {
+      List<FileStatus> statuses = new ArrayList<>();
+      List<RecursiveListing> tasks = new ArrayList<>();
+
+      try {
+        for (FileStatus status : fs.listStatus(path, filter)) {
+          if (isStatusApplicable(status, scope)) {
+            statuses.add(status);
+          }
+          if (status.isDirectory()) {
+            RecursiveListing task = new RecursiveListing(fs, status.getPath(), scope, suppressExceptions, filter);
+            task.fork();
+            tasks.add(task);
+          }
+        }
+      } catch (Exception e) {
+        if (suppressExceptions) {
+          logger.debug("Exception during listing file statuses", e);
+        } else {
+          // is used to re-throw checked exception
+          ErrorHelper.sneakyThrow(e);
         }
       }
-    } catch (Exception e) {
-      if (suppressExceptions) {
-        logger.debug("Exception during listing file statuses", e);
-      } else {
-        throw e;
-      }
+
+      tasks.stream()
+        .map(ForkJoinTask::join)
+        .forEach(statuses::addAll);
+
+      return statuses;
     }
-    return statuses;
   }
+
 }


[drill] 07/10: DRILL-6894: CTAS and CTTAS are not working on S3 storage when cache is disabled

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit a9331361c72d47c98ae16087e865bdf61eb01d96
Author: Bohdan Kazydub <bo...@gmail.com>
AuthorDate: Fri Dec 14 19:42:51 2018 +0200

    DRILL-6894: CTAS and CTTAS are not working on S3 storage when cache is disabled
    
    - provided JsonRecordWriter, ParquetRecordWriter and DrillTextRecordWriter with file system configuration
    closes #1576
---
 .../org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java  | 9 ++-------
 .../org/apache/drill/exec/store/easy/json/JsonRecordWriter.java  | 9 +++++----
 .../org/apache/drill/exec/store/easy/text/TextFormatPlugin.java  | 9 ++-------
 .../org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java | 3 ---
 .../org/apache/drill/exec/store/parquet/ParquetRecordWriter.java | 3 +--
 .../org/apache/drill/exec/store/text/DrillTextRecordWriter.java  | 9 +++++----
 6 files changed, 15 insertions(+), 27 deletions(-)

diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java
index 721e800..11dc204 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java
@@ -33,13 +33,11 @@ import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.RecordReader;
 import org.apache.drill.exec.store.RecordWriter;
 import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
 import org.apache.drill.exec.store.dfs.easy.EasyWriter;
 import org.apache.drill.exec.store.dfs.easy.FileWork;
 import org.apache.drill.exec.store.easy.json.JSONFormatPlugin.JSONFormatConfig;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 
 import com.fasterxml.jackson.annotation.JsonInclude;
 import com.fasterxml.jackson.annotation.JsonTypeName;
@@ -72,20 +70,17 @@ public class JSONFormatPlugin extends EasyFormatPlugin<JSONFormatConfig> {
     Map<String, String> options = new HashMap<>();
 
     options.put("location", writer.getLocation());
-
     FragmentHandle handle = context.getHandle();
     String fragmentId = String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
     options.put("prefix", fragmentId);
-
     options.put("separator", " ");
-    options.put(FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig) writer.getStorageConfig()).getConnection());
-
     options.put("extension", "json");
     options.put("extended", Boolean.toString(context.getOptions().getOption(ExecConstants.JSON_EXTENDED_TYPES)));
     options.put("uglify", Boolean.toString(context.getOptions().getOption(ExecConstants.JSON_WRITER_UGLIFY)));
     options.put("skipnulls", Boolean.toString(context.getOptions().getOption(ExecConstants.JSON_WRITER_SKIPNULLFIELDS)));
     options.put("enableNanInf", Boolean.toString(context.getOptions().getOption(ExecConstants.JSON_WRITER_NAN_INF_NUMBERS_VALIDATOR)));
-    RecordWriter recordWriter = new JsonRecordWriter(writer.getStorageStrategy());
+
+    RecordWriter recordWriter = new JsonRecordWriter(writer.getStorageStrategy(), getFsConf());
     recordWriter.init(options);
 
     return recordWriter;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JsonRecordWriter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JsonRecordWriter.java
index 9e6aaf8..2e80b3f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JsonRecordWriter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JsonRecordWriter.java
@@ -64,8 +64,11 @@ public class JsonRecordWriter extends JSONOutputRecordWriter implements RecordWr
   // Record write status
   private boolean fRecordStarted = false; // true once the startRecord() is called until endRecord() is called
 
-  public JsonRecordWriter(StorageStrategy storageStrategy){
+  private Configuration fsConf;
+
+  public JsonRecordWriter(StorageStrategy storageStrategy, Configuration fsConf) {
     this.storageStrategy = storageStrategy == null ? StorageStrategy.DEFAULT : storageStrategy;
+    this.fsConf = new Configuration(fsConf);
   }
 
   @Override
@@ -78,9 +81,7 @@ public class JsonRecordWriter extends JSONOutputRecordWriter implements RecordWr
     this.skipNullFields = Boolean.parseBoolean(writerOptions.get("skipnulls"));
     final boolean uglify = Boolean.parseBoolean(writerOptions.get("uglify"));
 
-    Configuration conf = new Configuration();
-    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, writerOptions.get(FileSystem.FS_DEFAULT_NAME_KEY));
-    this.fs = FileSystem.get(conf);
+    this.fs = FileSystem.get(fsConf);
 
     Path fileName = new Path(location, prefix + "_" + index + "." + extension);
     try {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/TextFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/TextFormatPlugin.java
index bc129ae..2ac24d8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/TextFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/TextFormatPlugin.java
@@ -39,7 +39,6 @@ import org.apache.drill.exec.store.RecordReader;
 import org.apache.drill.exec.store.RecordWriter;
 import org.apache.drill.exec.store.dfs.DrillFileSystem;
 import org.apache.drill.exec.store.dfs.FileSelection;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
 import org.apache.drill.exec.store.dfs.easy.EasyGroupScan;
 import org.apache.drill.exec.store.dfs.easy.EasyWriter;
@@ -50,7 +49,6 @@ import org.apache.drill.exec.store.schedule.CompleteFileWork;
 import org.apache.drill.exec.store.text.DrillTextRecordReader;
 import org.apache.drill.exec.store.text.DrillTextRecordWriter;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.FileSplit;
 
@@ -117,17 +115,14 @@ public class TextFormatPlugin extends EasyFormatPlugin<TextFormatPlugin.TextForm
     final Map<String, String> options = new HashMap<>();
 
     options.put("location", writer.getLocation());
-
     FragmentHandle handle = context.getHandle();
     String fragmentId = String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
     options.put("prefix", fragmentId);
-
     options.put("separator", getConfig().getFieldDelimiterAsString());
-    options.put(FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig) writer.getStorageConfig()).getConnection());
-
     options.put("extension", getConfig().getExtensions().get(0));
 
-    RecordWriter recordWriter = new DrillTextRecordWriter(context.getAllocator(), writer.getStorageStrategy());
+    RecordWriter recordWriter = new DrillTextRecordWriter(
+        context.getAllocator(), writer.getStorageStrategy(), writer.getFormatPlugin().getFsConf());
     recordWriter.init(options);
 
     return recordWriter;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java
index 2c40996..f46cc1c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java
@@ -47,7 +47,6 @@ import org.apache.drill.exec.store.StoragePluginOptimizerRule;
 import org.apache.drill.exec.store.dfs.BasicFormatMatcher;
 import org.apache.drill.exec.store.dfs.DrillFileSystem;
 import org.apache.drill.exec.store.dfs.FileSelection;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
 import org.apache.drill.exec.store.dfs.FileSystemPlugin;
 import org.apache.drill.exec.store.parquet.metadata.Metadata;
 import org.apache.drill.exec.util.DrillFileSystemUtil;
@@ -140,8 +139,6 @@ public class ParquetFormatPlugin implements FormatPlugin {
     String fragmentId = String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
     options.put("prefix", fragmentId);
 
-    options.put(FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig) writer.getStorageConfig()).getConnection());
-
     options.put(ExecConstants.PARQUET_BLOCK_SIZE, context.getOptions().getOption(ExecConstants.PARQUET_BLOCK_SIZE).num_val.toString());
     options.put(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK,
       context.getOptions().getOption(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK).bool_val.toString());
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
index 45233c4..5a64f40 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
@@ -132,6 +132,7 @@ public class ParquetRecordWriter extends ParquetOutputRecordWriter {
     this.extraMetaData.put(WRITER_VERSION_PROPERTY, String.valueOf(ParquetWriter.WRITER_VERSION));
     this.storageStrategy = writer.getStorageStrategy() == null ? StorageStrategy.DEFAULT : writer.getStorageStrategy();
     this.cleanUpLocations = Lists.newArrayList();
+    this.conf = new Configuration(writer.getFormatPlugin().getFsConf());
   }
 
   @Override
@@ -139,8 +140,6 @@ public class ParquetRecordWriter extends ParquetOutputRecordWriter {
     this.location = writerOptions.get("location");
     this.prefix = writerOptions.get("prefix");
 
-    conf = new Configuration();
-    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, writerOptions.get(FileSystem.FS_DEFAULT_NAME_KEY));
     fs = FileSystem.get(conf);
     blockSize = Integer.parseInt(writerOptions.get(ExecConstants.PARQUET_BLOCK_SIZE));
     pageSize = Integer.parseInt(writerOptions.get(ExecConstants.PARQUET_PAGE_SIZE));
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/text/DrillTextRecordWriter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/text/DrillTextRecordWriter.java
index 7b7c47f..83a00bd 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/text/DrillTextRecordWriter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/text/DrillTextRecordWriter.java
@@ -56,9 +56,12 @@ public class DrillTextRecordWriter extends StringOutputRecordWriter {
   private boolean fRecordStarted = false; // true once the startRecord() is called until endRecord() is called
   private StringBuilder currentRecord; // contains the current record separated by field delimiter
 
-  public DrillTextRecordWriter(BufferAllocator allocator, StorageStrategy storageStrategy) {
+  private Configuration fsConf;
+
+  public DrillTextRecordWriter(BufferAllocator allocator, StorageStrategy storageStrategy, Configuration fsConf) {
     super(allocator);
     this.storageStrategy = storageStrategy == null ? StorageStrategy.DEFAULT : storageStrategy;
+    this.fsConf = new Configuration(fsConf);
   }
 
   @Override
@@ -68,9 +71,7 @@ public class DrillTextRecordWriter extends StringOutputRecordWriter {
     this.fieldDelimiter = writerOptions.get("separator");
     this.extension = writerOptions.get("extension");
 
-    Configuration conf = new Configuration();
-    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, writerOptions.get(FileSystem.FS_DEFAULT_NAME_KEY));
-    this.fs = FileSystem.get(conf);
+    this.fs = FileSystem.get(fsConf);
 
     this.currentRecord = new StringBuilder();
     this.index = 0;


[drill] 02/10: DRILL-6934: Update the option documentation for planner.enable_unnest_lateral closes #1587

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit 922beabc50f6b7179a7c5fb71d7fd45073f1ec70
Author: Sorabh Hamirwasia <sh...@maprtech.com>
AuthorDate: Thu Dec 27 13:09:35 2018 -0800

    DRILL-6934: Update the option documentation for planner.enable_unnest_lateral
    closes #1587
---
 .../java/org/apache/drill/exec/planner/physical/PlannerSettings.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
index 69b9e3d..95d77fa 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
@@ -196,7 +196,7 @@ public class PlannerSettings implements Context{
    */
   public static final String ENABLE_UNNEST_LATERAL_KEY = "planner.enable_unnest_lateral";
   public static final BooleanValidator ENABLE_UNNEST_LATERAL = new BooleanValidator(ENABLE_UNNEST_LATERAL_KEY,
-      new OptionDescription("Enables lateral join functionality. Default is false. (Drill 1.14+)"));
+      new OptionDescription("Enables lateral join functionality. Default is true. (Drill 1.15+)"));
 
   /*
      Enables rules that re-write query joins in the most optimal way.


[drill] 01/10: DRILL-6936: TestGracefulShutdown.gracefulShutdownThreadShouldBeInitializedBeforeClosingDrillbit fails if loopback address is set in hosts closes #1589

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit d96bea54b96b01ffe9bb19c76bfc158784f26ffc
Author: Igor Guzenko <ih...@gmail.com>
AuthorDate: Fri Dec 28 21:04:42 2018 +0200

    DRILL-6936: TestGracefulShutdown.gracefulShutdownThreadShouldBeInitializedBeforeClosingDrillbit fails if loopback address is set in hosts
    closes #1589
---
 .../apache/drill/test/TestGracefulShutdown.java    | 46 +++++++++-------------
 1 file changed, 18 insertions(+), 28 deletions(-)

diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/TestGracefulShutdown.java b/exec/java-exec/src/test/java/org/apache/drill/test/TestGracefulShutdown.java
index 94180e2..a433237 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/TestGracefulShutdown.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/TestGracefulShutdown.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package org.apache.drill.test;
+
 import org.apache.drill.categories.SlowTest;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.ExecConstants;
@@ -179,40 +180,28 @@ public class TestGracefulShutdown extends BaseTestQuery {
   }
 
   @Test // DRILL-6912
-  public void gracefulShutdownThreadShouldBeInitializedBeforeClosingDrillbit() throws Exception {
-    Drillbit drillbit = null;
-    Drillbit drillbitWithSamePort = null;
-
-    int userPort = QueryTestUtil.getFreePortNumber(31170, 300);
-    int bitPort = QueryTestUtil.getFreePortNumber(31180, 300);
+  public void testDrillbitWithSamePortContainsShutdownThread() throws Exception {
     ClusterFixtureBuilder fixtureBuilder = ClusterFixture.bareBuilder(dirTestWatcher).withLocalZk()
-        .configProperty(ExecConstants.INITIAL_USER_PORT, userPort)
-        .configProperty(ExecConstants.INITIAL_BIT_PORT, bitPort);
-    try (ClusterFixture clusterFixture = fixtureBuilder.build()) {
-      drillbit = clusterFixture.drillbit();
-
-      // creating another drillbit instance using same config
-      drillbitWithSamePort = new Drillbit(clusterFixture.config(), fixtureBuilder.configBuilder().getDefinitions(),
-          clusterFixture.serviceSet());
-
+        .configProperty(ExecConstants.ALLOW_LOOPBACK_ADDRESS_BINDING, true)
+        .configProperty(ExecConstants.INITIAL_USER_PORT, QueryTestUtil.getFreePortNumber(31170, 300))
+        .configProperty(ExecConstants.INITIAL_BIT_PORT, QueryTestUtil.getFreePortNumber(31180, 300));
+
+    try (ClusterFixture fixture = fixtureBuilder.build();
+         Drillbit drillbitWithSamePort = new Drillbit(fixture.config(),
+             fixtureBuilder.configBuilder().getDefinitions(), fixture.serviceSet())) {
+      // Assert preconditions :
+      //      1. First drillbit instance should be started normally
+      //      2. Second instance startup should fail, because ports are occupied by the first one
+      assertNotNull("First drillbit instance should be initialized", fixture.drillbit());
       try {
         drillbitWithSamePort.run();
-        fail("drillbitWithSamePort.run() should throw UserException");
+        fail("Invocation of 'drillbitWithSamePort.run()' should throw UserException");
       } catch (UserException e) {
-        // it's expected that second drillbit can't be started because port is busy
         assertThat(e.getMessage(), containsString("RESOURCE ERROR: Drillbit could not bind to port"));
+        // Ensure that drillbit with failed startup may be safely closed
+        assertNotNull("Drillbit.gracefulShutdownThread shouldn't be null, otherwise close() may throw NPE (if so, check suppressed exception).",
+            drillbitWithSamePort.getGracefulShutdownThread());
       }
-    } finally {
-      // preconditions
-      assertNotNull(drillbit);
-      assertNotNull(drillbitWithSamePort);
-      assertNotNull("gracefulShutdownThread should be initialized, otherwise NPE will be thrown from close()",
-          drillbit.getGracefulShutdownThread());
-      // main test case
-      assertNotNull("gracefulShutdownThread should be initialized, otherwise NPE will be thrown from close()",
-          drillbitWithSamePort.getGracefulShutdownThread());
-      drillbit.close();
-      drillbitWithSamePort.close();
     }
   }
 
@@ -245,4 +234,5 @@ public class TestGracefulShutdown extends BaseTestQuery {
       fail(e.getMessage());
     }
   }
+
 }


[drill] 05/10: DRILL-6921: Add Clear button for /options filter

Posted by gp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gparai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit 8a85879507866a83293812205ceab2591391c590
Author: Kunal Khatua <kk...@maprtech.com>
AuthorDate: Tue Jan 1 10:10:07 2019 -0800

    DRILL-6921: Add Clear button for /options filter
    
    This commit adds the following search enhancements:
    1. Addition of a clear ('x') button in the search field
    2. If a filter term has been entered in the search box, on clicking the Update or Reset-to-Default button of any option, the reloaded page will re-apply the last entered filter term in the search box.
    3. If the search box is empty, on clicking the Update or Reset-to-Default button of any option, the reloaded page will automatically filter out everything except the updated/reset option to show the changed value.
    4. Customization of the quick search terms. (using defaults in drill-module.conf)
    closes #1588
---
 .../java/org/apache/drill/exec/ExecConstants.java  |  2 +
 .../drill/exec/server/rest/StatusResources.java    | 55 ++++++++++++++++++----
 .../java-exec/src/main/resources/drill-module.conf |  3 +-
 exec/java-exec/src/main/resources/rest/options.ftl | 50 ++++++++++++++------
 4 files changed, 86 insertions(+), 24 deletions(-)

diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index afa29d9..77cfb9f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -224,6 +224,8 @@ public final class ExecConstants {
   public static final String HTTP_AUTHENTICATION_MECHANISMS = "drill.exec.http.auth.mechanisms";
   public static final String HTTP_SPNEGO_PRINCIPAL = "drill.exec.http.auth.spnego.principal";
   public static final String HTTP_SPNEGO_KEYTAB = "drill.exec.http.auth.spnego.keytab";
+  //Customize filters in options
+  public static final String HTTP_WEB_OPTIONS_FILTERS = "drill.exec.http.web.options.filters";
   public static final String SYS_STORE_PROVIDER_CLASS = "drill.exec.sys.store.provider.class";
   public static final String SYS_STORE_PROVIDER_LOCAL_PATH = "drill.exec.sys.store.provider.local.path";
   public static final String SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE = "drill.exec.sys.store.provider.local.write";
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/StatusResources.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/StatusResources.java
index 1ebef31..9123b65 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/StatusResources.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/StatusResources.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.server.rest;
 
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.LinkedList;
@@ -31,13 +32,16 @@ import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.Path;
 import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.SecurityContext;
+import javax.ws.rs.core.UriInfo;
 import javax.xml.bind.annotation.XmlRootElement;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.commons.lang3.tuple.Pair;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.server.options.OptionList;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.server.options.OptionValue;
@@ -63,6 +67,8 @@ public class StatusResources {
   public static final String PATH_INTERNAL_OPTIONS_JSON = "/internal_options" + REST_API_SUFFIX;
   public static final String PATH_OPTIONS = "/options";
   public static final String PATH_INTERNAL_OPTIONS = "/internal_options";
+  //Used to access current filter state in WebUI
+  private static final String CURRENT_FILTER_PARAM = "filter";
 
   @Inject UserAuthEnabled authEnabled;
   @Inject WorkManager work;
@@ -118,27 +124,35 @@ public class StatusResources {
     return getSystemOptionsJSONHelper(true);
   }
 
-  private Viewable getSystemOptionsHelper(boolean internal) {
+  //Generate model-view for WebUI (PATH_OPTIONS and PATH_INTERNAL_OPTIONS)
+  private Viewable getSystemOptionsHelper(boolean internal, UriInfo uriInfo) {
+    List<OptionWrapper> options = getSystemOptionsJSONHelper(internal);
+    List<String> fltrList = new ArrayList<>(work.getContext().getConfig().getStringList(ExecConstants.HTTP_WEB_OPTIONS_FILTERS));
+    String currFilter = (uriInfo != null) ? uriInfo.getQueryParameters().getFirst(CURRENT_FILTER_PARAM) : null;
+    if (currFilter == null) {
+      currFilter = "";
+    }
+
     return ViewableWithPermissions.create(authEnabled.get(),
       "/rest/options.ftl",
       sc,
-      getSystemOptionsJSONHelper(internal));
+      new OptionsListing(options, fltrList, currFilter));
   }
 
   @GET
   @Path(StatusResources.PATH_OPTIONS)
   @RolesAllowed(DrillUserPrincipal.AUTHENTICATED_ROLE)
   @Produces(MediaType.TEXT_HTML)
-  public Viewable getSystemPublicOptions() {
-    return getSystemOptionsHelper(false);
+  public Viewable getSystemPublicOptions(@Context UriInfo uriInfo) {
+    return getSystemOptionsHelper(false, uriInfo);
   }
 
   @GET
   @Path(StatusResources.PATH_INTERNAL_OPTIONS)
   @RolesAllowed(DrillUserPrincipal.AUTHENTICATED_ROLE)
   @Produces(MediaType.TEXT_HTML)
-  public Viewable getSystemInternalOptions() {
-    return getSystemOptionsHelper(true);
+  public Viewable getSystemInternalOptions(@Context UriInfo uriInfo) {
+    return getSystemOptionsHelper(true, uriInfo);
   }
 
   @SuppressWarnings("resource")
@@ -160,9 +174,34 @@ public class StatusResources {
     }
 
     if (optionManager.getOptionDefinition(name).getMetaData().isInternal()) {
-      return getSystemInternalOptions();
+      return getSystemInternalOptions(null);
     } else {
-      return getSystemPublicOptions();
+      return getSystemPublicOptions(null);
+    }
+  }
+
+  /**
+   * Data Model for rendering /options on webUI
+   */
+  public static class OptionsListing {
+    private final List<OptionWrapper> options;
+    private final List<String> filters;
+    private final String dynamicFilter;
+
+    public OptionsListing(List<OptionWrapper> optList, List<String> fltrList, String currFilter) {
+      this.options = optList;
+      this.filters = fltrList;
+      this.dynamicFilter = currFilter;
+    }
+
+    public List<OptionWrapper> getOptions() {
+      return options;
+    }
+    public List<String> getFilters() {
+      return filters;
+    }
+    public String getDynamicFilter() {
+      return dynamicFilter;
     }
   }
 
diff --git a/exec/java-exec/src/main/resources/drill-module.conf b/exec/java-exec/src/main/resources/drill-module.conf
index 3682a85..e792b20 100644
--- a/exec/java-exec/src/main/resources/drill-module.conf
+++ b/exec/java-exec/src/main/resources/drill-module.conf
@@ -155,7 +155,8 @@ drill.exec: {
             reservation: 0,
             maximum: 9223372036854775807
         }
-    }
+    },
+    web.options.filters: ["planner", "store", "parquet", "hashagg", "hashjoin"]
   },
   //setting javax variables for ssl configurations is being deprecated.
   ssl: {
diff --git a/exec/java-exec/src/main/resources/rest/options.ftl b/exec/java-exec/src/main/resources/rest/options.ftl
index a8e03fa..1fce03c 100644
--- a/exec/java-exec/src/main/resources/rest/options.ftl
+++ b/exec/java-exec/src/main/resources/rest/options.ftl
@@ -24,8 +24,18 @@
     <script>
     //Alter System Values
     function alterSysOption(optionName, optionValue, optionKind) {
+        var currHref = location.href;
+        var redirectHref = currHref.replace(/(.?filter=).*/,"");
+        //Read filter value and apply to reload with new filter
+        var reApplyFilter = $("#searchBox").val();
+        if (reApplyFilter != null && reApplyFilter.trim().length > 0) {
+            redirectHref = redirectHref + "?filter=" + reApplyFilter.trim();
+        } else { //Apply filter for updated field
+            redirectHref = redirectHref + "?filter=" + optionName;
+        }
         $.post("/option/"+optionName, {kind: optionKind, name: optionName, value: optionValue}, function () {
-            location.reload(true);
+            //Remove existing filters
+            location.href=redirectHref;
         });
     }
 
@@ -71,16 +81,19 @@ table.sortable thead .sorting_desc { background-image: url("/static/img/black-de
 <#macro page_body>
   <div class="page-header">
   </div>
-  <div class="btn-group btn-group-sm" style="display:inline-block;">
-  <button type="button" class="btn" style="cursor:default;font-weight:bold;" > Quick Filters </button>
-  <button type="button" class="btn btn-info" onclick="inject(this.innerHTML);">planner</button>
-  <button type="button" class="btn btn-info" onclick="inject(this.innerHTML);">store</button>
-  <button type="button" class="btn btn-info" onclick="inject(this.innerHTML);">parquet</button>
-  <button type="button" class="btn btn-info" onclick="inject(this.innerHTML);">hashagg</button>
-  <button type="button" class="btn btn-info" onclick="inject(this.innerHTML);">hashjoin</button>
-  </div>
   <div class="col-xs-4">
-  <input id="searchBox"  name="searchBox" class="form-control" type="text" value="" placeholder="Search options...">
+    <div class="input-group input-sm" >
+      <input id="searchBox" name="searchBox" class="form-control" type="text" value="" placeholder="Search options...">
+        <div class="input-group-btn">
+          <button class="btn btn-default" type="button" onclick="$('#searchBox').val('').focus();" title="Clear search" style="font-weight:bold">&times;</button>
+        </div> 
+    </div>
+  </div>
+  <div class="btn-group btn-group-sm" style="padding-top:0.5%;">
+  <button type="button" class="btn" style="cursor:default;font-weight:bold;" > Quick Filters </button>
+  <#list model.getFilters() as filter>
+  <button type="button" class="btn btn-info" onclick="inject(this.innerHTML);">${filter}</button>
+  </#list>
   </div>
 
   <div class="table-responsive">
@@ -92,9 +105,7 @@ table.sortable thead .sorting_desc { background-image: url("/static/img/black-de
           <th style="width:45%">DESCRIPTION</th>
         </tr>
       </thead>
-      <tbody>
-        <#assign i = 1>
-        <#list model as option>
+      <tbody><#assign i = 1><#list model.getOptions() as option>
           <tr id="row-${i}">
             <td style="font-family:Courier New; vertical-align:middle" id='optionName'>${option.getName()}</td>
             <td>
@@ -142,7 +153,7 @@ table.sortable thead .sorting_desc { background-image: url("/static/img/black-de
             "infoEmpty": "No options available",
             "infoFiltered": ""
         }
-      } );
+      });
 
     //Draw when the table is ready
     $(document).ready(function() {
@@ -156,10 +167,19 @@ table.sortable thead .sorting_desc { background-image: url("/static/img/black-de
 
       // Draw DataTable
       optTable.rows().invalidate().draw();
+
+      //Re-Inject Filter keyword here
+      let explicitFltr = "";
+      if (window.location.search.indexOf("filter=") >= 1) {
+        //Select 1st occurrence (Chrome accepts 1st of duplicates)
+        let kvPair=window.location.search.substr(1).split('&')[0];
+        explicitFltr=kvPair.split('=')[1]
+        inject(explicitFltr);
+      }
     });
 
     //EventListener to update table when changes are detected
-    $('#searchBox').on('keyup change', function () {
+    $('#searchBox').on('keyup focus change', function () {
       optTable.search(this.value).draw().toString();
     });