You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2016/09/12 20:24:38 UTC

[06/31] hive git commit: HIVE-14217: Druid integration (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 8b08ae7..911b86b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlCountAggFunc
 import org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlMinMaxAggFunction;
 import org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlSumAggFunction;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateGranularity;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIn;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
@@ -206,6 +207,7 @@ public class SqlFunctionConverter {
         case IS_NOT_NULL:
         case IS_NULL:
         case CASE:
+        case OTHER_FUNCTION:
           node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION");
           node.addChild((ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text));
           break;
@@ -311,8 +313,8 @@ public class SqlFunctionConverter {
       registerFunction("+", SqlStdOperatorTable.PLUS, hToken(HiveParser.PLUS, "+"));
       registerFunction("-", SqlStdOperatorTable.MINUS, hToken(HiveParser.MINUS, "-"));
       registerFunction("*", SqlStdOperatorTable.MULTIPLY, hToken(HiveParser.STAR, "*"));
-      registerFunction("/", SqlStdOperatorTable.DIVIDE, hToken(HiveParser.STAR, "/"));
-      registerFunction("%", SqlStdOperatorTable.MOD, hToken(HiveParser.STAR, "%"));
+      registerFunction("/", SqlStdOperatorTable.DIVIDE, hToken(HiveParser.DIVIDE, "/"));
+      registerFunction("%", SqlStdOperatorTable.MOD, hToken(HiveParser.Identifier, "%"));
       registerFunction("and", SqlStdOperatorTable.AND, hToken(HiveParser.KW_AND, "and"));
       registerFunction("or", SqlStdOperatorTable.OR, hToken(HiveParser.KW_OR, "or"));
       registerFunction("=", SqlStdOperatorTable.EQUALS, hToken(HiveParser.EQUAL, "="));
@@ -334,6 +336,23 @@ public class SqlFunctionConverter {
       registerFunction("isnull", SqlStdOperatorTable.IS_NULL, hToken(HiveParser.TOK_ISNULL, "TOK_ISNULL"));
       registerFunction("when", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
       registerDuplicateFunction("case", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
+      // timebased
+      registerFunction("floor_year", HiveDateGranularity.YEAR,
+          hToken(HiveParser.Identifier, "floor_year"));
+      registerFunction("floor_quarter", HiveDateGranularity.QUARTER,
+          hToken(HiveParser.Identifier, "floor_quarter"));
+      registerFunction("floor_month", HiveDateGranularity.MONTH,
+          hToken(HiveParser.Identifier, "floor_month"));
+      registerFunction("floor_week", HiveDateGranularity.WEEK,
+          hToken(HiveParser.Identifier, "floor_week"));
+      registerFunction("floor_day", HiveDateGranularity.DAY,
+          hToken(HiveParser.Identifier, "floor_day"));
+      registerFunction("floor_hour", HiveDateGranularity.HOUR,
+          hToken(HiveParser.Identifier, "floor_hour"));
+      registerFunction("floor_minute", HiveDateGranularity.MINUTE,
+          hToken(HiveParser.Identifier, "floor_minute"));
+      registerFunction("floor_second", HiveDateGranularity.SECOND,
+          hToken(HiveParser.Identifier, "floor_second"));
     }
 
     private void registerFunction(String name, SqlOperator calciteFn, HiveToken hiveToken) {

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 18a55cb..ff94160 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -24,6 +24,7 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.math.BigDecimal;
 import java.util.AbstractMap.SimpleEntry;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.BitSet;
 import java.util.Collections;
 import java.util.EnumSet;
@@ -61,6 +62,7 @@ import org.apache.calcite.rel.core.Aggregate;
 import org.apache.calcite.rel.core.AggregateCall;
 import org.apache.calcite.rel.core.Filter;
 import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.core.TableScan;
 import org.apache.calcite.rel.metadata.CachingRelMetadataProvider;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
@@ -98,6 +100,7 @@ import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.ImmutableIntList;
 import org.apache.calcite.util.Pair;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
@@ -128,6 +131,11 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
 import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveVolcanoPlanner;
+import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidQuery;
+import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidRules;
+import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidSchema;
+import org.apache.hadoop.hive.ql.optimizer.calcite.druid.DruidTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.druid.HiveDruidConf;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID;
@@ -208,6 +216,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.joda.time.Interval;
 
 import com.google.common.base.Function;
 import com.google.common.collect.ImmutableList;
@@ -931,8 +940,11 @@ public class CalcitePlanner extends SemanticAnalyzer {
       final Double maxMemory = (double) HiveConf.getLongVar(
               conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
       HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
+      final int selectThreshold = (int) HiveConf.getIntVar(
+              conf, HiveConf.ConfVars.HIVE_DRUID_SELECT_THRESHOLD);
+      HiveDruidConf druidConf = new HiveDruidConf(selectThreshold);
       HiveRulesRegistry registry = new HiveRulesRegistry();
-      HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry);
+      HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, druidConf, registry);
       RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext);
       final RelOptQuery query = new RelOptQuery(planner);
       final RexBuilder rexBuilder = cluster.getRexBuilder();
@@ -1070,10 +1082,18 @@ public class CalcitePlanner extends SemanticAnalyzer {
         perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, "Calcite: Window fixing rule");
       }
 
-      // 8. Run rules to aid in translation from Calcite tree to Hive tree
+      // 8. Apply Druid transformation rules
+      perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
+      calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), null,
+              HepMatchOrder.BOTTOM_UP, DruidRules.FILTER, DruidRules.PROJECT_AGGREGATE,
+              DruidRules.PROJECT, DruidRules.AGGREGATE, DruidRules.PROJECT_SORT,
+              DruidRules.SORT, DruidRules.SORT_PROJECT);
+      perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, "Calcite: Druid transformation rules");
+
+      // 9. Run rules to aid in translation from Calcite tree to Hive tree
       if (HiveConf.getBoolVar(conf, ConfVars.HIVE_CBO_RETPATH_HIVEOP)) {
         perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
-        // 8.1. Merge join into multijoin operators (if possible)
+        // 9.1. Merge join into multijoin operators (if possible)
         calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, true, mdProvider.getMetadataProvider(), null,
                 HepMatchOrder.BOTTOM_UP, HiveJoinProjectTransposeRule.BOTH_PROJECT_INCLUDE_OUTER,
                 HiveJoinProjectTransposeRule.LEFT_PROJECT_INCLUDE_OUTER,
@@ -1091,7 +1111,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
             new HiveFilterProjectTSTransposeRule(Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY,
                     HiveProject.class, HiveRelFactories.HIVE_PROJECT_FACTORY, HiveTableScan.class));
 
-        // 8.2.  Introduce exchange operators below join/multijoin operators
+        // 9.2.  Introduce exchange operators below join/multijoin operators
         calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), null,
                 HepMatchOrder.BOTTOM_UP, HiveInsertExchange4JoinRule.EXCHANGE_BELOW_JOIN,
                 HiveInsertExchange4JoinRule.EXCHANGE_BELOW_MULTIJOIN);
@@ -1251,7 +1271,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       basePlan = hepPlan(basePlan, true, mdProvider, null,
           new HiveFilterProjectTSTransposeRule(
               Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, HiveProject.class,
-              HiveRelFactories.HIVE_PROJECT_FACTORY, HiveTableScan.class),
+              HiveRelFactories.HIVE_PROJECT_FACTORY, TableScan.class),
           HiveProjectFilterPullUpConstantsRule.INSTANCE);
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Rerun PPD");
@@ -1657,7 +1677,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
 
     private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticException {
       RowResolver rr = new RowResolver();
-      HiveTableScan tableRel = null;
+      RelNode tableRel = null;
 
       try {
 
@@ -1713,16 +1733,20 @@ public class CalcitePlanner extends SemanticAnalyzer {
           partitionColumns.add(colInfo);
         }
 
+        final TableType tableType = obtainTableType(tabMetaData);
+
         // 3.3 Add column info corresponding to virtual columns
         List<VirtualColumn> virtualCols = new ArrayList<VirtualColumn>();
-        Iterator<VirtualColumn> vcs = VirtualColumn.getRegistry(conf).iterator();
-        while (vcs.hasNext()) {
-          VirtualColumn vc = vcs.next();
-          colInfo = new ColumnInfo(vc.getName(), vc.getTypeInfo(), tableAlias, true,
-              vc.getIsHidden());
-          rr.put(tableAlias, vc.getName().toLowerCase(), colInfo);
-          cInfoLst.add(colInfo);
-          virtualCols.add(vc);
+        if (tableType == TableType.NATIVE) {
+          Iterator<VirtualColumn> vcs = VirtualColumn.getRegistry(conf).iterator();
+          while (vcs.hasNext()) {
+            VirtualColumn vc = vcs.next();
+            colInfo = new ColumnInfo(vc.getName(), vc.getTypeInfo(), tableAlias, true,
+                vc.getIsHidden());
+            rr.put(tableAlias, vc.getName().toLowerCase(), colInfo);
+            cInfoLst.add(colInfo);
+            virtualCols.add(vc);
+          }
         }
 
         // 3.4 Build row type from field <type, name>
@@ -1737,15 +1761,50 @@ public class CalcitePlanner extends SemanticAnalyzer {
           fullyQualifiedTabName = tabMetaData.getTableName();
         }
         RelOptHiveTable optTable = new RelOptHiveTable(relOptSchema, fullyQualifiedTabName,
-            rowType, tabMetaData, nonPartitionColumns, partitionColumns, virtualCols, conf,
-            partitionCache, noColsMissingStats);
-
-        // 5. Build Hive Table Scan Rel
-        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable,
-            null == tableAlias ? tabMetaData.getTableName() : tableAlias,
-            getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf,
-                HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP), qb.isInsideView()
-                || qb.getAliasInsideView().contains(tableAlias.toLowerCase()));
+                rowType, tabMetaData, nonPartitionColumns, partitionColumns, virtualCols, conf,
+                partitionCache, noColsMissingStats);
+
+        // 5. Build operator
+        if (tableType == TableType.DRUID) {
+          // Build Druid query
+          String address = HiveConf.getVar(conf,
+                  HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
+          String dataSource = tabMetaData.getParameters().get(Constants.DRUID_DATA_SOURCE);
+          Set<String> metrics = new HashSet<>();
+          List<RelDataType> druidColTypes = new ArrayList<>();
+          List<String> druidColNames = new ArrayList<>();
+          for (RelDataTypeField field : rowType.getFieldList()) {
+            druidColTypes.add(field.getType());
+            druidColNames.add(field.getName());
+            if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
+              // timestamp
+              continue;
+            }
+            if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
+              // dimension
+              continue;
+            }
+            metrics.add(field.getName());
+          }
+          List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
+
+          DruidTable druidTable = new DruidTable(new DruidSchema(address),
+                  dataSource, rowType, metrics, intervals, DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+          final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
+                  optTable, null == tableAlias ? tabMetaData.getTableName() : tableAlias,
+                  getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf,
+                      HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP), qb.isInsideView()
+                      || qb.getAliasInsideView().contains(tableAlias.toLowerCase()));
+          tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
+              optTable, druidTable, ImmutableList.<RelNode>of(scan));
+        } else {
+          // Build Hive Table Scan Rel
+          tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable,
+              null == tableAlias ? tabMetaData.getTableName() : tableAlias,
+              getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf,
+                  HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP), qb.isInsideView()
+                  || qb.getAliasInsideView().contains(tableAlias.toLowerCase()));
+        }
 
         // 6. Add Schema(RR) to RelNode-Schema map
         ImmutableMap<String, Integer> hiveToCalciteColMap = buildHiveToCalciteColumnMap(rr,
@@ -1763,6 +1822,15 @@ public class CalcitePlanner extends SemanticAnalyzer {
       return tableRel;
     }
 
+    private TableType obtainTableType(Table tabMetaData) {
+      if (tabMetaData.getStorageHandler() != null &&
+              tabMetaData.getStorageHandler().toString().equals(
+                      Constants.DRUID_HIVE_STORAGE_HANDLER_ID)) {
+        return TableType.DRUID;
+      }
+      return TableType.NATIVE;
+    }
+
     private RelNode genFilterRelNode(ASTNode filterExpr, RelNode srcRel,
             boolean useCaching) throws SemanticException {
       ExprNodeDesc filterCondn = genExprNodeDesc(filterExpr, relToHiveRR.get(srcRel), useCaching);
@@ -3366,4 +3434,9 @@ public class CalcitePlanner extends SemanticAnalyzer {
     }
   }
 
+  private enum TableType {
+    DRUID,
+    NATIVE
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index ace0e9c..943d9d7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.common.BlobStorageUtils;
-import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
@@ -156,6 +155,7 @@ import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFrameSpec;
 import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec;
 import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
+import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
@@ -9787,11 +9787,24 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     // is the table already present
     TableScanOperator top = topOps.get(alias_id);
 
+    // Obtain table props in query
+    Map<String, String> properties = qb.getTabPropsForAlias(alias);
+
     if (top == null) {
       // Determine row schema for TSOP.
       // Include column names from SerDe, the partition and virtual columns.
       rwsch = new RowResolver();
       try {
+        // Including parameters passed in the query
+        if (properties != null) {
+          for (Entry<String, String> prop : properties.entrySet()) {
+            if (tab.getSerdeParam(prop.getKey()) != null) {
+              LOG.warn("SerDe property in input query overrides stored SerDe property");
+            }
+            tab.setSerdeParam(prop.getKey(), prop.getValue());
+          }
+        }
+        // Obtain inspector for schema
         StructObjectInspector rowObjectInspector = (StructObjectInspector) tab
             .getDeserializer().getObjectInspector();
         List<? extends StructField> fields = rowObjectInspector
@@ -9852,10 +9865,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       // Add a mapping from the table scan operator to Table
       topToTable.put(top, tab);
 
-      Map<String, String> props = qb.getTabPropsForAlias(alias);
-      if (props != null) {
-        topToTableProps.put(top, props);
-        tsDesc.setOpProps(props);
+      if (properties != null) {
+        topToTableProps.put(top, properties);
+        tsDesc.setOpProps(properties);
       }
     } else {
       rwsch = opParseCtx.get(top).getRowResolver();

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
index bf808c3..eafba21 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
@@ -24,14 +24,15 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.DDLTask;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -476,7 +477,8 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
 
     if ((this.getCols() == null) || (this.getCols().size() == 0)) {
       // for now make sure that serde exists
-      if (Table.hasMetastoreBasedSchema(conf, getSerName())) {
+      if (Table.hasMetastoreBasedSchema(conf, serName) &&
+              StringUtils.isEmpty(getStorageHandler())) {
         throw new SemanticException(ErrorMsg.INVALID_TBL_DDL_SERDE.getMsg());
       }
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
index 47c65bb..ebe613e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
@@ -389,4 +389,11 @@ public class TableScanDesc extends AbstractOperatorDesc {
     }
     return rtn;
   }
+
+  @Override
+  @Explain(displayName = "properties", explainLevels = { Level.DEFAULT, Level.USER, Level.EXTENDED })
+  public Map<String, String> getOpProps() {
+    return opProps;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloor.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloor.java
new file mode 100644
index 0000000..08ed9fd
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloor.java
@@ -0,0 +1,506 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.joda.time.Chronology;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.Period;
+import org.joda.time.ReadableDuration;
+import org.joda.time.chrono.ISOChronology;
+
+import com.google.common.collect.ImmutableMap;
+
+/**
+ * UDFDateFloor.
+ *
+ * Abstract class that converts a timestamp to a timestamp with a given granularity.
+ */
+public abstract class UDFDateFloor extends UDF {
+
+  private final QueryGranularity granularity;
+
+  private final TimestampWritable result;
+
+  public UDFDateFloor(String granularity) {
+    this.granularity = QueryGranularity.fromString(granularity);
+    this.result = new TimestampWritable();
+  }
+
+  public TimestampWritable evaluate(TimestampWritable t) {
+    if (t == null) {
+      return null;
+    }
+    long newTimestamp = granularity.truncate(t.getTimestamp().getTime());
+    result.setTime(newTimestamp);
+    return result;
+  }
+
+  /*
+   * This code that creates the result for the granularity functions has been brought from Druid
+   */
+
+  private static final Map<String, PeriodGranularity> CALENDRIC_GRANULARITIES = ImmutableMap.of(
+          "YEAR", new PeriodGranularity(new Period("P1Y"), null, null),
+          "MONTH", new PeriodGranularity(new Period("P1M"), null, null),
+          "QUARTER", new PeriodGranularity(new Period("P3M"), null, null),
+          "WEEK", new PeriodGranularity(new Period("P1W"), null, null));
+
+  private static abstract class QueryGranularity {
+
+    public abstract long next(long offset);
+
+    public abstract long truncate(long offset);
+
+    public abstract DateTime toDateTime(long offset);
+
+    public abstract Iterable<Long> iterable(final long start, final long end);
+
+    public static QueryGranularity fromString(String str) {
+      String name = str.toUpperCase();
+      if (CALENDRIC_GRANULARITIES.containsKey(name)) {
+        return CALENDRIC_GRANULARITIES.get(name);
+      }
+      return new DurationGranularity(convertValue(str), 0);
+    }
+
+    private static enum MillisIn {
+      SECOND(1000), MINUTE(60 * 1000), FIFTEEN_MINUTE(15 * 60 * 1000), THIRTY_MINUTE(
+              30 * 60 * 1000), HOUR(3600 * 1000), DAY(24 * 3600 * 1000);
+
+      private final long millis;
+
+      MillisIn(final long millis) {
+        this.millis = millis;
+      }
+    }
+
+    private static long convertValue(Object o) {
+      if (o instanceof String) {
+        return MillisIn.valueOf(((String) o).toUpperCase()).millis;
+      } else if (o instanceof ReadableDuration) {
+        return ((ReadableDuration) o).getMillis();
+      } else if (o instanceof Number) {
+        return ((Number) o).longValue();
+      }
+      throw new RuntimeException("Granularity not recognized");
+    }
+  }
+
+  private static abstract class BaseQueryGranularity extends QueryGranularity {
+    public abstract long next(long offset);
+
+    public abstract long truncate(long offset);
+
+    public DateTime toDateTime(long offset) {
+      return new DateTime(offset, DateTimeZone.UTC);
+    }
+
+    public Iterable<Long> iterable(final long start, final long end) {
+      return new Iterable<Long>() {
+        @Override
+        public Iterator<Long> iterator() {
+          return new Iterator<Long>() {
+            long curr = truncate(start);
+
+            long next = BaseQueryGranularity.this.next(curr);
+
+            @Override
+            public boolean hasNext() {
+              return curr < end;
+            }
+
+            @Override
+            public Long next() {
+              if (!hasNext()) {
+                throw new NoSuchElementException();
+              }
+
+              long retVal = curr;
+
+              curr = next;
+              next = BaseQueryGranularity.this.next(curr);
+
+              return retVal;
+            }
+
+            @Override
+            public void remove() {
+              throw new UnsupportedOperationException();
+            }
+          };
+        }
+      };
+    }
+  }
+
+  private static class PeriodGranularity extends BaseQueryGranularity {
+    private final Period period;
+
+    private final Chronology chronology;
+
+    private final long origin;
+
+    private final boolean hasOrigin;
+
+    private final boolean isCompound;
+
+    public PeriodGranularity(Period period, DateTime origin, DateTimeZone tz) {
+      this.period = period;
+      this.chronology = tz == null ? ISOChronology.getInstanceUTC() : ISOChronology.getInstance(tz);
+      if (origin == null) {
+        // default to origin in given time zone when aligning multi-period granularities
+        this.origin = new DateTime(0, DateTimeZone.UTC).withZoneRetainFields(chronology.getZone())
+                .getMillis();
+        this.hasOrigin = false;
+      } else {
+        this.origin = origin.getMillis();
+        this.hasOrigin = true;
+      }
+      this.isCompound = isCompoundPeriod(period);
+    }
+
+    @Override
+    public DateTime toDateTime(long t) {
+      return new DateTime(t, chronology.getZone());
+    }
+
+    @Override
+    public long next(long t) {
+      return chronology.add(period, t, 1);
+    }
+
+    @Override
+    public long truncate(long t) {
+      if (isCompound) {
+        try {
+          return truncateMillisPeriod(t);
+        } catch (UnsupportedOperationException e) {
+          return truncateCompoundPeriod(t);
+        }
+      }
+
+      final int years = period.getYears();
+      if (years > 0) {
+        if (years > 1 || hasOrigin) {
+          int y = chronology.years().getDifference(t, origin);
+          y -= y % years;
+          long tt = chronology.years().add(origin, y);
+          // always round down to the previous period (for timestamps prior to origin)
+          if (t < tt)
+            t = chronology.years().add(tt, -years);
+          else
+            t = tt;
+          return t;
+        } else {
+          return chronology.year().roundFloor(t);
+        }
+      }
+
+      final int months = period.getMonths();
+      if (months > 0) {
+        if (months > 1 || hasOrigin) {
+          int m = chronology.months().getDifference(t, origin);
+          m -= m % months;
+          long tt = chronology.months().add(origin, m);
+          // always round down to the previous period (for timestamps prior to origin)
+          if (t < tt)
+            t = chronology.months().add(tt, -months);
+          else
+            t = tt;
+          return t;
+        } else {
+          return chronology.monthOfYear().roundFloor(t);
+        }
+      }
+
+      final int weeks = period.getWeeks();
+      if (weeks > 0) {
+        if (weeks > 1 || hasOrigin) {
+          // align on multiples from origin
+          int w = chronology.weeks().getDifference(t, origin);
+          w -= w % weeks;
+          long tt = chronology.weeks().add(origin, w);
+          // always round down to the previous period (for timestamps prior to origin)
+          if (t < tt)
+            t = chronology.weeks().add(tt, -weeks);
+          else
+            t = tt;
+          return t;
+        } else {
+          t = chronology.dayOfWeek().roundFloor(t);
+          // default to Monday as beginning of the week
+          return chronology.dayOfWeek().set(t, 1);
+        }
+      }
+
+      final int days = period.getDays();
+      if (days > 0) {
+        if (days > 1 || hasOrigin) {
+          // align on multiples from origin
+          int d = chronology.days().getDifference(t, origin);
+          d -= d % days;
+          long tt = chronology.days().add(origin, d);
+          // always round down to the previous period (for timestamps prior to origin)
+          if (t < tt)
+            t = chronology.days().add(tt, -days);
+          else
+            t = tt;
+          return t;
+        } else {
+          t = chronology.hourOfDay().roundFloor(t);
+          return chronology.hourOfDay().set(t, 0);
+        }
+      }
+
+      final int hours = period.getHours();
+      if (hours > 0) {
+        if (hours > 1 || hasOrigin) {
+          // align on multiples from origin
+          long h = chronology.hours().getDifferenceAsLong(t, origin);
+          h -= h % hours;
+          long tt = chronology.hours().add(origin, h);
+          // always round down to the previous period (for timestamps prior to origin)
+          if (t < tt)
+            t = chronology.hours().add(tt, -hours);
+          else
+            t = tt;
+          return t;
+        } else {
+          t = chronology.minuteOfHour().roundFloor(t);
+          return chronology.minuteOfHour().set(t, 0);
+        }
+      }
+
+      final int minutes = period.getMinutes();
+      if (minutes > 0) {
+        // align on multiples from origin
+        if (minutes > 1 || hasOrigin) {
+          long m = chronology.minutes().getDifferenceAsLong(t, origin);
+          m -= m % minutes;
+          long tt = chronology.minutes().add(origin, m);
+          // always round down to the previous period (for timestamps prior to origin)
+          if (t < tt)
+            t = chronology.minutes().add(tt, -minutes);
+          else
+            t = tt;
+          return t;
+        } else {
+          t = chronology.secondOfMinute().roundFloor(t);
+          return chronology.secondOfMinute().set(t, 0);
+        }
+      }
+
+      final int seconds = period.getSeconds();
+      if (seconds > 0) {
+        // align on multiples from origin
+        if (seconds > 1 || hasOrigin) {
+          long s = chronology.seconds().getDifferenceAsLong(t, origin);
+          s -= s % seconds;
+          long tt = chronology.seconds().add(origin, s);
+          // always round down to the previous period (for timestamps prior to origin)
+          if (t < tt)
+            t = chronology.seconds().add(tt, -seconds);
+          else
+            t = tt;
+          return t;
+        } else {
+          return chronology.millisOfSecond().set(t, 0);
+        }
+      }
+
+      final int millis = period.getMillis();
+      if (millis > 0) {
+        if (millis > 1) {
+          long ms = chronology.millis().getDifferenceAsLong(t, origin);
+          ms -= ms % millis;
+          long tt = chronology.millis().add(origin, ms);
+          // always round down to the previous period (for timestamps prior to origin)
+          if (t < tt)
+            t = chronology.millis().add(tt, -millis);
+          else
+            t = tt;
+          return t;
+        } else {
+          return t;
+        }
+      }
+
+      return t;
+    }
+
+    private static boolean isCompoundPeriod(Period period) {
+      int[] values = period.getValues();
+      boolean single = false;
+      for (int v : values) {
+        if (v > 0) {
+          if (single)
+            return true;
+          single = true;
+        }
+      }
+      return false;
+    }
+
+    private long truncateCompoundPeriod(long t) {
+      long current;
+      if (t >= origin) {
+        long next = origin;
+        do {
+          current = next;
+          next = chronology.add(period, current, 1);
+        } while (t >= next);
+      } else {
+        current = origin;
+        do {
+          current = chronology.add(period, current, -1);
+        } while (t < current);
+      }
+      return current;
+    }
+
+    private long truncateMillisPeriod(final long t) {
+      // toStandardDuration assumes days are always 24h, and hours are always 60 minutes,
+      // which may not always be the case, e.g if there are daylight saving changes.
+      if (chronology.days().isPrecise() && chronology.hours().isPrecise()) {
+        final long millis = period.toStandardDuration().getMillis();
+        long offset = t % millis - origin % millis;
+        if (offset < 0) {
+          offset += millis;
+        }
+        return t - offset;
+      } else {
+        throw new UnsupportedOperationException(
+                "Period cannot be converted to milliseconds as some fields mays vary in length with chronology "
+                        + chronology.toString());
+      }
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+
+      PeriodGranularity that = (PeriodGranularity) o;
+
+      if (hasOrigin != that.hasOrigin) {
+        return false;
+      }
+      if (origin != that.origin) {
+        return false;
+      }
+      if (!chronology.equals(that.chronology)) {
+        return false;
+      }
+      if (!period.equals(that.period)) {
+        return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = period.hashCode();
+      result = 31 * result + chronology.hashCode();
+      result = 31 * result + (int) (origin ^ (origin >>> 32));
+      result = 31 * result + (hasOrigin ? 1 : 0);
+      return result;
+    }
+
+    @Override
+    public String toString() {
+      return "PeriodGranularity{" + "period=" + period + ", timeZone=" + chronology.getZone()
+              + ", origin=" + (hasOrigin ? origin : "null") + '}';
+    }
+  }
+
+  private static class DurationGranularity extends BaseQueryGranularity {
+    private final long length;
+
+    private final long origin;
+
+    public DurationGranularity(long millis, long origin) {
+      this.length = millis;
+      this.origin = origin % length;
+    }
+
+    @Override
+    public long next(long t) {
+      return t + getDurationMillis();
+    }
+
+    @Override
+    public long truncate(final long t) {
+      final long duration = getDurationMillis();
+      long offset = t % duration - origin % duration;
+      if (offset < 0) {
+        offset += duration;
+      }
+      return t - offset;
+    }
+
+    public long getDurationMillis() {
+      return length;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+
+      DurationGranularity that = (DurationGranularity) o;
+
+      if (length != that.length) {
+        return false;
+      }
+      if (origin != that.origin) {
+        return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = (int) (length ^ (length >>> 32));
+      result = 31 * result + (int) (origin ^ (origin >>> 32));
+      return result;
+    }
+
+    @Override
+    public String toString() {
+      return "DurationGranularity{" + "length=" + length + ", origin=" + origin + '}';
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorDay.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorDay.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorDay.java
new file mode 100644
index 0000000..e205797
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorDay.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+
+/**
+ * UDFDateFloorDay.
+ *
+ * Converts a timestamp to a timestamp with day granularity.
+ */
+@Description(name = "floor_day",
+    value = "_FUNC_(param) - Returns the timestamp at a day granularity",
+    extended = "param needs to be a timestamp value\n"
+    + "Example:\n "
+    + "  > SELECT _FUNC_(CAST('yyyy-MM-dd HH:mm:ss' AS TIMESTAMP)) FROM src;\n"
+    + "  yyyy-MM-dd 00:00:00")
+public class UDFDateFloorDay extends UDFDateFloor {
+
+  public UDFDateFloorDay() {
+    super("DAY");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorHour.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorHour.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorHour.java
new file mode 100644
index 0000000..84fd394
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorHour.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+
+/**
+ * UDFDateFloorHour.
+ *
+ * Converts a timestamp to a timestamp with hour granularity.
+ */
+@Description(name = "floor_hour",
+    value = "_FUNC_(param) - Returns the timestamp at a hour granularity",
+    extended = "param needs to be a timestamp value\n"
+    + "Example:\n "
+    + "  > SELECT _FUNC_(CAST('yyyy-MM-dd HH:mm:ss' AS TIMESTAMP)) FROM src;\n"
+    + "  yyyy-MM-dd HH:00:00")
+public class UDFDateFloorHour extends UDFDateFloor {
+
+  public UDFDateFloorHour() {
+    super("HOUR");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorMinute.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorMinute.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorMinute.java
new file mode 100644
index 0000000..45b8f7e
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorMinute.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+
+/**
+ * UDFDateFloorMinute.
+ *
+ * Converts a timestamp to a timestamp with minute granularity.
+ */
+@Description(name = "floor_minute",
+    value = "_FUNC_(param) - Returns the timestamp at a minute granularity",
+    extended = "param needs to be a timestamp value\n"
+    + "Example:\n "
+    + "  > SELECT _FUNC_(CAST('yyyy-MM-dd HH:mm:ss' AS TIMESTAMP)) FROM src;\n"
+    + "  yyyy-MM-dd HH:mm:00")
+public class UDFDateFloorMinute extends UDFDateFloor {
+
+  public UDFDateFloorMinute() {
+    super("MINUTE");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorMonth.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorMonth.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorMonth.java
new file mode 100644
index 0000000..e3c70a1
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorMonth.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+
+/**
+ * UDFDateFloorMonth.
+ *
+ * Converts a timestamp to a timestamp with month granularity.
+ */
+@Description(name = "floor_month",
+    value = "_FUNC_(param) - Returns the timestamp at a month granularity",
+    extended = "param needs to be a timestamp value\n"
+    + "Example:\n "
+    + "  > SELECT _FUNC_(CAST('yyyy-MM-dd HH:mm:ss' AS TIMESTAMP)) FROM src;\n"
+    + "  yyyy-MM-01 00:00:00")
+public class UDFDateFloorMonth extends UDFDateFloor {
+
+  public UDFDateFloorMonth() {
+    super("MONTH");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorQuarter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorQuarter.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorQuarter.java
new file mode 100644
index 0000000..c017238
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorQuarter.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+
+/**
+ * UDFDateFloorQuarter.
+ *
+ * Converts a timestamp to a timestamp with quarter granularity.
+ */
+@Description(name = "floor_quarter",
+    value = "_FUNC_(param) - Returns the timestamp at a quarter granularity",
+    extended = "param needs to be a timestamp value\n"
+    + "Example:\n "
+    + "  > SELECT _FUNC_(CAST('yyyy-MM-dd HH:mm:ss' AS TIMESTAMP)) FROM src;\n"
+    + "  yyyy-xx-01 00:00:00")
+public class UDFDateFloorQuarter extends UDFDateFloor {
+
+  public UDFDateFloorQuarter() {
+    super("QUARTER");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorSecond.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorSecond.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorSecond.java
new file mode 100644
index 0000000..5e10026
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorSecond.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+
+/**
+ * UDFDateFloorSecond.
+ *
+ * Converts a timestamp to a timestamp with second granularity.
+ */
+@Description(name = "floor_second",
+    value = "_FUNC_(param) - Returns the timestamp at a second granularity",
+    extended = "param needs to be a timestamp value\n"
+    + "Example:\n "
+    + "  > SELECT _FUNC_(CAST('yyyy-MM-dd HH:mm:ss' AS TIMESTAMP)) FROM src;\n"
+    + "  yyyy-MM-dd HH:mm:ss")
+public class UDFDateFloorSecond extends UDFDateFloor {
+
+  public UDFDateFloorSecond() {
+    super("SECOND");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorWeek.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorWeek.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorWeek.java
new file mode 100644
index 0000000..185a84a
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorWeek.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+
+/**
+ * UDFDateFloorWeek.
+ *
+ * Converts a timestamp to a timestamp with week granularity.
+ */
+@Description(name = "floor_week",
+    value = "_FUNC_(param) - Returns the timestamp at a week granularity",
+    extended = "param needs to be a timestamp value\n"
+    + "Example:\n "
+    + "  > SELECT _FUNC_(CAST('yyyy-MM-dd HH:mm:ss' AS TIMESTAMP)) FROM src;\n"
+    + "  yyyy-MM-xx 00:00:00")
+public class UDFDateFloorWeek extends UDFDateFloor {
+
+  public UDFDateFloorWeek() {
+    super("WEEK");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorYear.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorYear.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorYear.java
new file mode 100644
index 0000000..e68decf
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateFloorYear.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+
+/**
+ * UDFDateFloorYear.
+ *
+ * Converts a timestamp to a timestamp with year granularity.
+ */
+@Description(name = "floor_year",
+    value = "_FUNC_(param) - Returns the timestamp at a year granularity",
+    extended = "param needs to be a timestamp value\n"
+    + "Example:\n "
+    + "  > SELECT _FUNC_(CAST('yyyy-MM-dd HH:mm:ss' AS TIMESTAMP)) FROM src;\n"
+    + "  yyyy-01-01 00:00:00")
+public class UDFDateFloorYear extends UDFDateFloor {
+
+  public UDFDateFloorYear() {
+    super("YEAR");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
index 44e157b..2830f1f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
@@ -61,7 +61,7 @@ public class TestCBORuleFiredOnlyOnce {
 
     // Create rules registry to not trigger a rule more than once
     HiveRulesRegistry registry = new HiveRulesRegistry();
-    HivePlannerContext context = new HivePlannerContext(null, registry);
+    HivePlannerContext context = new HivePlannerContext(null, null, registry);
     HepPlanner planner = new HepPlanner(programBuilder.build(), context);
 
     // Cluster

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFDateFormatGranularity.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFDateFormatGranularity.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFDateFormatGranularity.java
new file mode 100644
index 0000000..f871de2
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFDateFormatGranularity.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.udf;
+
+import java.sql.Timestamp;
+
+import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.junit.Test;
+
+import junit.framework.TestCase;
+
+public class TestUDFDateFormatGranularity extends TestCase {
+  
+  @Test
+  public void testTimestampToTimestampWithGranularity() throws Exception {
+    // Running example
+    // Friday 30th August 1985 02:47:02 AM
+    final TimestampWritable t = new TimestampWritable(new Timestamp(494218022082L));
+    UDFDateFloor g;
+
+    // Year granularity
+    // Tuesday 1st January 1985 12:00:00 AM
+    g = new UDFDateFloorYear();
+    TimestampWritable i1 = g.evaluate(t);
+    assertEquals(473385600000L, i1.getTimestamp().getTime());
+    
+    // Quarter granularity
+    // Monday 1st July 1985 12:00:00 AM
+    g = new UDFDateFloorQuarter();
+    TimestampWritable i2 = g.evaluate(t);
+    assertEquals(489024000000L, i2.getTimestamp().getTime());
+
+    // Month granularity
+    // Thursday 1st August 1985 12:00:00 AM
+    g = new UDFDateFloorMonth();
+    TimestampWritable i3 = g.evaluate(t);
+    assertEquals(491702400000L, i3.getTimestamp().getTime());
+
+    // Week granularity
+    // Monday 26th August 1985 12:00:00 AM
+    g = new UDFDateFloorWeek();
+    TimestampWritable i4 = g.evaluate(t);
+    assertEquals(493862400000L, i4.getTimestamp().getTime());
+
+    // Day granularity
+    // Friday 30th August 1985 12:00:00 AM
+    g = new UDFDateFloorDay();
+    TimestampWritable i5 = g.evaluate(t);
+    assertEquals(494208000000L, i5.getTimestamp().getTime());
+
+    // Hour granularity
+    // Friday 30th August 1985 02:00:00 AM
+    g = new UDFDateFloorHour();
+    TimestampWritable i6 = g.evaluate(t);
+    assertEquals(494215200000L, i6.getTimestamp().getTime());
+
+    // Minute granularity
+    // Friday 30th August 1985 02:47:00 AM
+    g = new UDFDateFloorMinute();
+    TimestampWritable i7 = g.evaluate(t);
+    assertEquals(494218020000L, i7.getTimestamp().getTime());
+
+    // Second granularity
+    // Friday 30th August 1985 02:47:02 AM
+    g = new UDFDateFloorSecond();
+    TimestampWritable i8 = g.evaluate(t);
+    assertEquals(494218022000L, i8.getTimestamp().getTime());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientnegative/druid_address.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/druid_address.q b/ql/src/test/queries/clientnegative/druid_address.q
new file mode 100644
index 0000000..35ba06a
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/druid_address.q
@@ -0,0 +1,5 @@
+set hive.druid.broker.address.default=;
+
+CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientnegative/druid_buckets.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/druid_buckets.q b/ql/src/test/queries/clientnegative/druid_buckets.q
new file mode 100644
index 0000000..6fc75d1
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/druid_buckets.q
@@ -0,0 +1,6 @@
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE EXTERNAL TABLE druid_table_1
+CLUSTERED BY (robot) INTO 32 BUCKETS
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientnegative/druid_datasource.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/druid_datasource.q b/ql/src/test/queries/clientnegative/druid_datasource.q
new file mode 100644
index 0000000..87481fd
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/druid_datasource.q
@@ -0,0 +1,3 @@
+CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("property" = "localhost");

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientnegative/druid_external.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/druid_external.q b/ql/src/test/queries/clientnegative/druid_external.q
new file mode 100644
index 0000000..2de04db
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/druid_external.q
@@ -0,0 +1,5 @@
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientnegative/druid_location.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/druid_location.q b/ql/src/test/queries/clientnegative/druid_location.q
new file mode 100644
index 0000000..a9705dc
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/druid_location.q
@@ -0,0 +1,6 @@
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+LOCATION '/testfolder/'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientnegative/druid_partitions.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/druid_partitions.q b/ql/src/test/queries/clientnegative/druid_partitions.q
new file mode 100644
index 0000000..e26a3b6
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/druid_partitions.q
@@ -0,0 +1,6 @@
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE EXTERNAL TABLE druid_table_1
+PARTITIONED BY (dt string)
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientpositive/druid_basic1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_basic1.q b/ql/src/test/queries/clientpositive/druid_basic1.q
new file mode 100644
index 0000000..83f5968
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/druid_basic1.q
@@ -0,0 +1,18 @@
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");
+
+DESCRIBE FORMATTED druid_table_1;
+
+-- different table, same datasource
+CREATE EXTERNAL TABLE druid_table_2
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");
+
+DESCRIBE FORMATTED druid_table_2;
+
+DROP TABLE druid_table_2;
+
+DROP TABLE druid_table_1;

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientpositive/druid_basic2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_basic2.q b/ql/src/test/queries/clientpositive/druid_basic2.q
new file mode 100644
index 0000000..fe24410
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/druid_basic2.q
@@ -0,0 +1,52 @@
+set hive.strict.checks.cartesian.product=false;
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");
+
+DESCRIBE FORMATTED druid_table_1;
+
+-- dimension
+EXPLAIN EXTENDED
+SELECT robot FROM druid_table_1;
+
+-- metric
+EXPLAIN EXTENDED
+SELECT delta FROM druid_table_1;
+
+EXPLAIN EXTENDED
+SELECT robot
+FROM druid_table_1
+WHERE language = 'en';
+
+EXPLAIN EXTENDED
+SELECT DISTINCT robot
+FROM druid_table_1
+WHERE language = 'en';
+
+-- TODO: currently nothing is pushed - ISNOTNULL
+EXPLAIN EXTENDED
+SELECT a.robot, b.language
+FROM
+(
+  (SELECT robot, language
+  FROM druid_table_1) a
+  JOIN
+  (SELECT language
+  FROM druid_table_1) b
+  ON a.language = b.language
+);
+
+EXPLAIN EXTENDED
+SELECT a.robot, b.language
+FROM
+(
+  (SELECT robot, language
+  FROM druid_table_1
+  WHERE language = 'en') a
+  JOIN
+  (SELECT language
+  FROM druid_table_1) b
+  ON a.language = b.language
+);

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientpositive/druid_intervals.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_intervals.q b/ql/src/test/queries/clientpositive/druid_intervals.q
new file mode 100644
index 0000000..140ff82
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/druid_intervals.q
@@ -0,0 +1,67 @@
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");
+
+DESCRIBE FORMATTED druid_table_1;
+
+-- (-\u221e\u2025+\u221e)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1;
+
+-- (-\u221e\u20252012-03-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` < '2012-03-01 00:00:00';
+
+-- [2010-01-01 00:00:00\u20252012-03-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00';
+
+-- [2010-01-01 00:00:00\u20252011-01-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00'
+    AND `__time` < '2011-01-01 00:00:00';
+
+-- [2010-01-01 00:00:00\u20252011-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00';
+
+-- [2010-01-01 00:00:00\u20252011-01-01 00:00:00],[2012-01-01 00:00:00\u20252013-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00')
+    OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00');
+
+-- OVERLAP [2010-01-01 00:00:00\u20252012-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00')
+    OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00');
+
+-- IN: MULTIPLE INTERVALS [2010-01-01 00:00:00\u20252010-01-01 00:00:00),[2011-01-01 00:00:00\u20252011-01-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00');
+
+EXPLAIN
+SELECT `__time`, robot
+FROM druid_table_1
+WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00');
+
+EXPLAIN
+SELECT `__time`, robot
+FROM druid_table_1
+WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00');

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientpositive/druid_timeseries.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_timeseries.q b/ql/src/test/queries/clientpositive/druid_timeseries.q
new file mode 100644
index 0000000..c0ad60f
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/druid_timeseries.q
@@ -0,0 +1,94 @@
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");
+
+DESCRIBE FORMATTED druid_table_1;
+
+-- GRANULARITY: ALL
+EXPLAIN
+SELECT max(added), sum(variation)
+FROM druid_table_1;
+
+-- GRANULARITY: NONE
+EXPLAIN
+SELECT `__time`, max(added), sum(variation)
+FROM druid_table_1
+GROUP BY `__time`;
+
+-- GRANULARITY: YEAR
+EXPLAIN
+SELECT floor_year(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_year(`__time`);
+
+-- GRANULARITY: QUARTER
+EXPLAIN
+SELECT floor_quarter(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_quarter(`__time`);
+
+-- GRANULARITY: MONTH
+EXPLAIN
+SELECT floor_month(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_month(`__time`);
+
+-- GRANULARITY: WEEK
+EXPLAIN
+SELECT floor_week(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_week(`__time`);
+
+-- GRANULARITY: DAY
+EXPLAIN
+SELECT floor_day(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_day(`__time`);
+
+-- GRANULARITY: HOUR
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_hour(`__time`);
+
+-- GRANULARITY: MINUTE
+EXPLAIN
+SELECT floor_minute(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_minute(`__time`);
+
+-- GRANULARITY: SECOND
+EXPLAIN
+SELECT floor_second(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_second(`__time`);
+
+-- WITH FILTER ON DIMENSION
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+WHERE robot='1'
+GROUP BY floor_hour(`__time`);
+
+-- WITH FILTER ON TIME
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+WHERE floor_hour(`__time`)
+    BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+GROUP BY floor_hour(`__time`);
+
+-- WITH FILTER ON TIME
+EXPLAIN
+SELECT subq.h, subq.m, subq.s
+FROM
+(
+  SELECT floor_hour(`__time`) as h, max(added) as m, sum(variation) as s
+  FROM druid_table_1
+  GROUP BY floor_hour(`__time`)
+) subq
+WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP);

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/queries/clientpositive/druid_topn.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_topn.q b/ql/src/test/queries/clientpositive/druid_topn.q
new file mode 100644
index 0000000..b121b7e
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/druid_topn.q
@@ -0,0 +1,75 @@
+set hive.druid.broker.address.default=localhost.test;
+
+CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia");
+
+DESCRIBE FORMATTED druid_table_1;
+
+-- GRANULARITY: ALL
+EXPLAIN
+SELECT robot, max(added) as m, sum(variation)
+FROM druid_table_1
+GROUP BY robot
+ORDER BY m DESC
+LIMIT 100;
+
+-- GRANULARITY: NONE
+EXPLAIN
+SELECT robot, `__time`, max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, `__time`
+ORDER BY s DESC
+LIMIT 100;
+
+-- GRANULARITY: YEAR
+EXPLAIN
+SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, floor_year(`__time`)
+ORDER BY s DESC
+LIMIT 10;
+
+-- ASC: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, floor_month(`__time`)
+ORDER BY s
+LIMIT 10;
+
+-- MULTIPLE ORDER: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, namespace, floor_month(`__time`)
+ORDER BY s DESC, m DESC
+LIMIT 10;
+
+-- MULTIPLE ORDER MIXED: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, namespace, floor_month(`__time`)
+ORDER BY robot ASC, m DESC
+LIMIT 10;
+
+-- WITH FILTER ON DIMENSION: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+WHERE robot='1'
+GROUP BY robot, floor_year(`__time`)
+ORDER BY s
+LIMIT 10;
+
+-- WITH FILTER ON TIME
+EXPLAIN
+SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation)
+FROM druid_table_1
+WHERE floor_hour(`__time`)
+    BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+GROUP BY robot, floor_hour(`__time`)
+ORDER BY m
+LIMIT 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientnegative/druid_address.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/druid_address.q.out b/ql/src/test/results/clientnegative/druid_address.q.out
new file mode 100644
index 0000000..66b7e14
--- /dev/null
+++ b/ql/src/test/results/clientnegative/druid_address.q.out
@@ -0,0 +1,7 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid broker address not specified in configuration)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientnegative/druid_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/druid_buckets.q.out b/ql/src/test/results/clientnegative/druid_buckets.q.out
new file mode 100644
index 0000000..94e4f70
--- /dev/null
+++ b/ql/src/test/results/clientnegative/druid_buckets.q.out
@@ -0,0 +1,8 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+CLUSTERED BY (robot) INTO 32 BUCKETS
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:CLUSTERED BY may not be specified for Druid)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientnegative/druid_datasource.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/druid_datasource.q.out b/ql/src/test/results/clientnegative/druid_datasource.q.out
new file mode 100644
index 0000000..177ffaa
--- /dev/null
+++ b/ql/src/test/results/clientnegative/druid_datasource.q.out
@@ -0,0 +1,7 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("property" = "localhost")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Druid data source not specified; use druid.datasource in table properties)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientnegative/druid_external.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/druid_external.q.out b/ql/src/test/results/clientnegative/druid_external.q.out
new file mode 100644
index 0000000..e5fac51
--- /dev/null
+++ b/ql/src/test/results/clientnegative/druid_external.q.out
@@ -0,0 +1,7 @@
+PREHOOK: query: CREATE TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Table in Druid needs to be declared as EXTERNAL)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientnegative/druid_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/druid_location.q.out b/ql/src/test/results/clientnegative/druid_location.q.out
new file mode 100644
index 0000000..5727e8c
--- /dev/null
+++ b/ql/src/test/results/clientnegative/druid_location.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+#### A masked pattern was here ####
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:LOCATION may not be specified for Druid)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientnegative/druid_partitions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/druid_partitions.q.out b/ql/src/test/results/clientnegative/druid_partitions.q.out
new file mode 100644
index 0000000..6fb55c1
--- /dev/null
+++ b/ql/src/test/results/clientnegative/druid_partitions.q.out
@@ -0,0 +1,8 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+PARTITIONED BY (dt string)
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:PARTITIONED BY may not be specified for Druid)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientpositive/create_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out
index d9c1e11..7281185 100644
--- a/ql/src/test/results/clientpositive/create_view.q.out
+++ b/ql/src/test/results/clientpositive/create_view.q.out
@@ -190,6 +190,8 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
+            properties:
+              insideView TRUE
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (UDFToDouble(key) = 18.0) (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientpositive/druid_basic1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_basic1.q.out b/ql/src/test/results/clientpositive/druid_basic1.q.out
new file mode 100644
index 0000000..74ae9ed
--- /dev/null
+++ b/ql/src/test/results/clientpositive/druid_basic1.q.out
@@ -0,0 +1,142 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_1
+PREHOOK: query: DESCRIBE FORMATTED druid_table_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@druid_table_1
+POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@druid_table_1
+# col_name            	data_type           	comment             
+	 	 
+__time              	timestamp           	from deserializer   
+robot               	string              	from deserializer   
+namespace           	string              	from deserializer   
+anonymous           	string              	from deserializer   
+unpatrolled         	string              	from deserializer   
+page                	string              	from deserializer   
+language            	string              	from deserializer   
+newpage             	string              	from deserializer   
+user                	string              	from deserializer   
+count               	float               	from deserializer   
+added               	float               	from deserializer   
+delta               	float               	from deserializer   
+variation           	float               	from deserializer   
+deleted             	float               	from deserializer   
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	EXTERNAL            	TRUE                
+	druid.datasource    	wikipedia           
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	storage_handler     	org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.druid.QTestDruidSerDe	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- different table, same datasource
+CREATE EXTERNAL TABLE druid_table_2
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_2
+POSTHOOK: query: -- different table, same datasource
+CREATE EXTERNAL TABLE druid_table_2
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_2
+PREHOOK: query: DESCRIBE FORMATTED druid_table_2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@druid_table_2
+POSTHOOK: query: DESCRIBE FORMATTED druid_table_2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@druid_table_2
+# col_name            	data_type           	comment             
+	 	 
+__time              	timestamp           	from deserializer   
+robot               	string              	from deserializer   
+namespace           	string              	from deserializer   
+anonymous           	string              	from deserializer   
+unpatrolled         	string              	from deserializer   
+page                	string              	from deserializer   
+language            	string              	from deserializer   
+newpage             	string              	from deserializer   
+user                	string              	from deserializer   
+count               	float               	from deserializer   
+added               	float               	from deserializer   
+delta               	float               	from deserializer   
+variation           	float               	from deserializer   
+deleted             	float               	from deserializer   
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	EXTERNAL            	TRUE                
+	druid.datasource    	wikipedia           
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	storage_handler     	org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.druid.QTestDruidSerDe	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: DROP TABLE druid_table_2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@druid_table_2
+PREHOOK: Output: default@druid_table_2
+POSTHOOK: query: DROP TABLE druid_table_2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@druid_table_2
+POSTHOOK: Output: default@druid_table_2
+PREHOOK: query: DROP TABLE druid_table_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@druid_table_1
+PREHOOK: Output: default@druid_table_1
+POSTHOOK: query: DROP TABLE druid_table_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@druid_table_1
+POSTHOOK: Output: default@druid_table_1