You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2018/06/09 19:57:26 UTC

hive git commit: HIVE-19796 : Push Down TRUNC Fn to Druid Storage Handler (Slim Bouguerra via Ashutosh Chauhan)

Repository: hive
Updated Branches:
  refs/heads/master 812ba321b -> 2493e4a41


HIVE-19796 : Push Down TRUNC Fn to Druid Storage Handler (Slim Bouguerra via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2493e4a4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2493e4a4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2493e4a4

Branch: refs/heads/master
Commit: 2493e4a41c93a77bc8f9658796fc34340b0bf462
Parents: 812ba32
Author: Slim Bouguerra <sl...@gmail.com>
Authored: Sat Jun 9 12:39:47 2018 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Jun 9 12:57:12 2018 -0700

----------------------------------------------------------------------
 .../reloperators/HiveDateAddSqlOperator.java    |  30 ++
 .../reloperators/HiveDateSubSqlOperator.java    |  30 ++
 .../calcite/reloperators/HiveExtractDate.java   |   6 +-
 .../HiveFromUnixTimeSqlOperator.java            |  32 ++
 .../reloperators/HiveToDateSqlOperator.java     |  37 ++
 .../reloperators/HiveTruncSqlOperator.java      |  36 ++
 .../HiveUnixTimestampSqlOperator.java           |  32 ++
 .../translator/SqlFunctionConverter.java        |  17 +
 .../ql/parse/DruidSqlOperatorConverter.java     | 213 +++++++++++
 .../hive/ql/udf/generic/GenericUDFDateSub.java  |  25 +-
 .../clientpositive/druidmini_expressions.q      |  37 ++
 .../druid/druidmini_expressions.q.out           | 357 +++++++++++++++++++
 .../clientpositive/infer_join_preds.q.out       |  54 +--
 13 files changed, 858 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateAddSqlOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateAddSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateAddSqlOperator.java
new file mode 100644
index 0000000..af9b12e
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateAddSqlOperator.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import org.apache.calcite.sql.fun.SqlAbstractTimeFunction;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+public class HiveDateAddSqlOperator extends SqlAbstractTimeFunction {
+  public static final HiveDateAddSqlOperator INSTANCE = new HiveDateAddSqlOperator();
+
+  protected HiveDateAddSqlOperator() {
+    super("DATE_ADD", SqlTypeName.DATE);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateSubSqlOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateSubSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateSubSqlOperator.java
new file mode 100644
index 0000000..4f73712
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateSubSqlOperator.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import org.apache.calcite.sql.fun.SqlAbstractTimeFunction;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+public class HiveDateSubSqlOperator extends SqlAbstractTimeFunction {
+  public static final HiveDateSubSqlOperator INSTANCE = new HiveDateSubSqlOperator();
+
+  protected HiveDateSubSqlOperator() {
+    super("DATE_SUB", SqlTypeName.DATE);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
index a43f406..fe5c242 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
@@ -17,17 +17,15 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
 
-import java.util.Set;
-
+import com.google.common.collect.Sets;
 import org.apache.calcite.sql.SqlFunction;
 import org.apache.calcite.sql.SqlFunctionCategory;
 import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.fun.SqlExtractFunction;
 import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.ReturnTypes;
 import org.apache.calcite.sql.type.SqlTypeTransforms;
 
-import com.google.common.collect.Sets;
+import java.util.Set;
 
 public class HiveExtractDate extends SqlFunction {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFromUnixTimeSqlOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFromUnixTimeSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFromUnixTimeSqlOperator.java
new file mode 100644
index 0000000..22d91b5
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFromUnixTimeSqlOperator.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import org.apache.calcite.sql.fun.SqlAbstractTimeFunction;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+/**
+ * Calcite SQL operator mapping to FROM_UNIXTIME Hive UDF
+ */
+public class HiveFromUnixTimeSqlOperator extends SqlAbstractTimeFunction {
+  public static final HiveFromUnixTimeSqlOperator INSTANCE = new HiveFromUnixTimeSqlOperator();
+  protected HiveFromUnixTimeSqlOperator() {
+    super("FROM_UNIXTIME", SqlTypeName.TIMESTAMP);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveToDateSqlOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveToDateSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveToDateSqlOperator.java
new file mode 100644
index 0000000..3f389f6
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveToDateSqlOperator.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlSpecialOperator;
+import org.apache.calcite.sql.type.ReturnTypes;
+
+/**
+ * Class used to map TO_DATE Hive UDF to Calcite planer in order to enable push down optimizations
+ *
+ */
+public class HiveToDateSqlOperator extends SqlSpecialOperator {
+
+  public static final HiveToDateSqlOperator INSTANCE = new HiveToDateSqlOperator();
+
+  private HiveToDateSqlOperator() {
+    super("TO_DATE", SqlKind.OTHER_FUNCTION, 30, true, ReturnTypes.DATE,
+        null, null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTruncSqlOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTruncSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTruncSqlOperator.java
new file mode 100644
index 0000000..4299237
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTruncSqlOperator.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlSpecialOperator;
+import org.apache.calcite.sql.type.ReturnTypes;
+
+/**
+ * Class to map Hive Trunc SQL operator during Calcite plan optimization
+ */
+public class HiveTruncSqlOperator extends SqlSpecialOperator {
+  public static HiveTruncSqlOperator INSTANCE =  new HiveTruncSqlOperator();
+  private HiveTruncSqlOperator(){
+    super("TRUNC", SqlKind.OTHER_FUNCTION, 30, true, ReturnTypes.ARG0_OR_EXACT_NO_SCALE,
+        null, null
+    );
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnixTimestampSqlOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnixTimestampSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnixTimestampSqlOperator.java
new file mode 100644
index 0000000..f5a549c
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnixTimestampSqlOperator.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import org.apache.calcite.sql.fun.SqlAbstractTimeFunction;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+/**
+ * Sql UNIX_TIMESTAMP calcite operator.
+ */
+public class HiveUnixTimestampSqlOperator extends SqlAbstractTimeFunction {
+  public static final HiveUnixTimestampSqlOperator INSTANCE = new HiveUnixTimestampSqlOperator();
+  protected HiveUnixTimestampSqlOperator() {
+    super("UNIX_TIMESTAMP", SqlTypeName.BIGINT);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 37b387f..5626471 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -51,9 +51,15 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlSumAggFuncti
 import org.apache.hadoop.hive.ql.optimizer.calcite.functions.HiveSqlVarianceAggFunction;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveConcat;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateAddSqlOperator;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateSubSqlOperator;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExtractDate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFloorDate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFromUnixTimeSqlOperator;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIn;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveToDateSqlOperator;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTruncSqlOperator;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnixTimestampSqlOperator;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.ParseDriver;
@@ -439,6 +445,17 @@ public class SqlFunctionConverter {
       registerFunction("length", SqlStdOperatorTable.CHARACTER_LENGTH,
           hToken(HiveParser.Identifier, "length")
       );
+      registerFunction("trunc", HiveTruncSqlOperator.INSTANCE, hToken(HiveParser.Identifier, "trunc"));
+      registerFunction("to_date", HiveToDateSqlOperator.INSTANCE, hToken(HiveParser.Identifier, "to_date"));
+      registerFunction("date_add", SqlStdOperatorTable.DATETIME_PLUS, hToken(HiveParser.Identifier, "date_add"));
+      registerFunction("to_unix_timestamp", HiveUnixTimestampSqlOperator.INSTANCE,
+          hToken(HiveParser.Identifier, "to_unix_timestamp")
+      );
+      registerFunction("from_unixtime", HiveFromUnixTimeSqlOperator.INSTANCE,
+          hToken(HiveParser.Identifier, "from_unixtime")
+      );
+      registerFunction("date_add", HiveDateAddSqlOperator.INSTANCE, hToken(HiveParser.Identifier, "date_add"));
+      registerFunction("date_sub", HiveDateSubSqlOperator.INSTANCE, hToken(HiveParser.Identifier, "date_sub"));
     }
 
     private void registerFunction(String name, SqlOperator calciteFn, HiveToken hiveToken) {

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
index ce04eec..2b1d0d3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.hive.ql.parse;
 
 import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Maps;
 import org.apache.calcite.adapter.druid.DirectOperatorConversion;
 import org.apache.calcite.adapter.druid.DruidExpressions;
@@ -26,6 +28,7 @@ import org.apache.calcite.adapter.druid.DruidQuery;
 import org.apache.calcite.adapter.druid.ExtractOperatorConversion;
 import org.apache.calcite.adapter.druid.FloorOperatorConversion;
 import org.apache.calcite.adapter.druid.UnarySuffixOperatorConversion;
+import org.apache.calcite.config.CalciteConnectionConfig;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexLiteral;
@@ -34,13 +37,22 @@ import org.apache.calcite.sql.SqlFunction;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.sql.type.SqlTypeUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveConcat;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateAddSqlOperator;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateSubSqlOperator;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExtractDate;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFloorDate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFromUnixTimeSqlOperator;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveToDateSqlOperator;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTruncSqlOperator;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnixTimestampSqlOperator;
+import org.joda.time.Period;
 
 import javax.annotation.Nullable;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.TimeZone;
 
 /**
  * Contains custom Druid SQL operator converter classes, contains either:
@@ -48,6 +60,10 @@ import java.util.Map;
  * Some temporary OperatorConversion that is not release by Calcite yet
  */
 public class DruidSqlOperatorConverter {
+
+  private static final String YYYY_MM_DD = "yyyy-MM-dd";
+  public static final String DEFAULT_TS_FORMAT = "yyyy-MM-dd HH:mm:ss";
+
   private DruidSqlOperatorConverter() {
   }
 
@@ -75,6 +91,12 @@ public class DruidSqlOperatorConverter {
       druidOperatorMap.put(SqlStdOperatorTable.IS_NOT_NULL,
           new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, "notnull")
       );
+      druidOperatorMap.put(HiveTruncSqlOperator.INSTANCE, new DruidDateTruncOperatorConversion());
+      druidOperatorMap.put(HiveToDateSqlOperator.INSTANCE, new DruidToDateOperatorConversion());
+      druidOperatorMap.put(HiveFromUnixTimeSqlOperator.INSTANCE, new DruidFormUnixTimeOperatorConversion());
+      druidOperatorMap.put(HiveUnixTimestampSqlOperator.INSTANCE, new DruidUnixTimestampOperatorConversion());
+      druidOperatorMap.put(HiveDateAddSqlOperator.INSTANCE, new DruidDateArithmeticOperatorConversion(1, HiveDateAddSqlOperator.INSTANCE));
+      druidOperatorMap.put(HiveDateSubSqlOperator.INSTANCE, new DruidDateArithmeticOperatorConversion(-1, HiveDateSubSqlOperator.INSTANCE));
     }
     return druidOperatorMap;
   }
@@ -126,4 +148,195 @@ public class DruidSqlOperatorConverter {
       return DruidQuery.format("substring(%s, %s, %s)", arg, indexStart, length);
     }
   }
+
+  /**
+   * Operator conversion form Hive TRUNC UDF to Druid Date Time UDFs.
+   */
+  public static class DruidDateTruncOperatorConversion
+      implements org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+    @Override public SqlOperator calciteOperator() {
+      return HiveTruncSqlOperator.INSTANCE;
+    }
+
+    @Nullable @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType, DruidQuery query
+    ) {
+      final RexCall call = (RexCall) rexNode;
+      //can handle only case trunc date type
+      if (call.getOperands().size() < 1) {
+        throw new IllegalStateException("trunc() requires at least 1 argument, got " + call.getOperands().size());
+      }
+      if (call.getOperands().size() == 1) {
+        final String arg = DruidExpressions.toDruidExpression(call.getOperands().get(0), rowType, query);
+        if (arg == null) {
+          return null;
+        }
+        if (SqlTypeUtil.isDatetime(call.getOperands().get(0).getType())) {
+          return applyTimestampFormat(
+              DruidExpressions.applyTimestampFloor(arg, Period.days(1).toString(), "", timezoneId(query)), YYYY_MM_DD,
+              timezoneId(query)
+          );
+        }
+        return null;
+      } else if (call.getOperands().size() == 2) {
+        final String arg = DruidExpressions.toDruidExpression(call.getOperands().get(0), rowType, query);
+        if (arg == null) {
+          return null;
+        }
+        String granularity = DruidExpressions.toDruidExpression(call.getOperands().get(1), rowType, query);
+        if (granularity == null) {
+          return null;
+        }
+        final String unit;
+        if ("'MONTH'".equals(granularity) || "'MON'".equals(granularity) || "'MM'".equals(granularity)) {
+          unit = Period.months(1).toString();
+        } else if ("'YEAR'".equals(granularity) || "'YYYY'".equals(granularity) || "'YY'".equals(granularity)) {
+          unit = Period.years(1).toString();
+        } else if ("'QUARTER'".equals(granularity) || "'Q'".equals(granularity)) {
+          unit = Period.months(3).toString();
+        } else {
+          unit = null;
+        }
+        if (unit == null) {
+          //bail out can not infer unit
+          return null;
+        }
+        return applyTimestampFormat(DruidExpressions.applyTimestampFloor(arg, unit, "", timezoneId(query)), YYYY_MM_DD,
+            timezoneId(query)
+        );
+      }
+      return null;
+    }
+  }
+
+  /**
+   * Expression operator conversion form Hive TO_DATE operator to Druid Date cast.
+   */
+  public static class DruidToDateOperatorConversion
+      implements org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+    @Override public SqlOperator calciteOperator() {
+      return HiveToDateSqlOperator.INSTANCE;
+    }
+
+    @Nullable @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType, DruidQuery query
+    ) {
+      final RexCall call = (RexCall) rexNode;
+      if (call.getOperands().size() != 1) {
+        throw new IllegalStateException("to_date() requires 1 argument, got " + call.getOperands().size());
+      }
+      final String arg = DruidExpressions.toDruidExpression(call.getOperands().get(0), rowType, query);
+      if (arg == null) {
+        return null;
+      }
+      return DruidExpressions.applyTimestampFloor(arg, Period.days(1).toString(), "", timezoneId(query));
+    }
+  }
+
+  public static class DruidUnixTimestampOperatorConversion
+      implements org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+    @Override public SqlOperator calciteOperator() {
+      return HiveUnixTimestampSqlOperator.INSTANCE;
+    }
+
+    @Nullable @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType, DruidQuery query
+    ) {
+      final RexCall call = (RexCall) rexNode;
+      final String arg0 = DruidExpressions.toDruidExpression(call.getOperands().get(0), rowType, query);
+      if (arg0 == null) {
+        return null;
+      }
+      if (SqlTypeUtil.isDatetime((call.getOperands().get(0).getType()))) {
+        // Timestamp is represented as long internally no need to any thing here
+        return DruidExpressions.functionCall("div", ImmutableList.of(arg0, DruidExpressions.numberLiteral(1000)));
+      }
+      // dealing with String type
+      final String format = call.getOperands().size() == 2 ? DruidExpressions
+          .toDruidExpression(call.getOperands().get(1), rowType, query) : DEFAULT_TS_FORMAT;
+      return DruidExpressions.functionCall("unix_timestamp", ImmutableList.of(arg0, DruidExpressions.stringLiteral(format)));
+    }
+  }
+
+  public static class DruidFormUnixTimeOperatorConversion
+      implements org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+    @Override public SqlOperator calciteOperator() {
+      return HiveFromUnixTimeSqlOperator.INSTANCE;
+    }
+
+    @Nullable @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType, DruidQuery query
+    ) {
+      final RexCall call = (RexCall) rexNode;
+      if (call.getOperands().size() < 1 || call.getOperands().size() > 2) {
+        throw new IllegalStateException("form_unixtime() requires 1 or 2 argument, got " + call.getOperands().size());
+      }
+      final String arg = DruidExpressions.toDruidExpression(call.getOperands().get(0), rowType, query);
+      if (arg == null) {
+        return null;
+      }
+
+      final String numMillis = DruidQuery.format("(%s * '1000')", arg);
+      final String format = call.getOperands().size() == 1 ? DEFAULT_TS_FORMAT : DruidExpressions
+          .toDruidExpression(call.getOperands().get(1), rowType, query);
+      return applyTimestampFormat(numMillis, format, timezoneId(query));
+    }
+  }
+
+  /**
+   * Base class for Date Add/Sub operator conversion
+   */
+  public static class DruidDateArithmeticOperatorConversion
+      implements org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+    private final int direction;
+    private final SqlOperator operator;
+
+    public DruidDateArithmeticOperatorConversion(int direction, SqlOperator operator) {
+      this.direction = direction;
+      this.operator = operator;
+      Preconditions.checkArgument(direction == 1 || direction == -1);
+    }
+
+    @Override public SqlOperator calciteOperator() {
+      return operator;
+    }
+
+    @Nullable @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType, DruidQuery query
+    ) {
+      final RexCall call = (RexCall) rexNode;
+      if (call.getOperands().size() != 2) {
+        throw new IllegalStateException("date_add/date_sub() requires 2 arguments, got " + call.getOperands().size());
+      }
+      final String arg0 = DruidExpressions.toDruidExpression(call.getOperands().get(0), rowType, query);
+      final String arg1 = DruidExpressions.toDruidExpression(call.getOperands().get(1), rowType, query);
+      if (arg0 == null || arg1 == null) {
+        return null;
+      }
+
+      final String steps = direction == -1 ? DruidQuery.format("-( %s )", arg1) : arg1;
+      return DruidExpressions.functionCall("timestamp_shift", ImmutableList
+          .of(arg0, DruidExpressions.stringLiteral("P1D"), steps,
+              DruidExpressions.stringLiteral(timezoneId(query).getID())
+          ));
+    }
+  }
+
+
+  /**
+   * utility function to extract timezone id from Druid query
+   * @param query Druid Rel
+   * @return time zone
+   */
+  private static TimeZone timezoneId(final DruidQuery query) {
+    return TimeZone.getTimeZone(
+        query.getTopNode().getCluster().getPlanner().getContext().unwrap(CalciteConnectionConfig.class).timeZone());
+  }
+
+  private static String applyTimestampFormat(String arg, String format, TimeZone timeZone) {
+    return DruidExpressions.functionCall("timestamp_format",
+        ImmutableList.of(arg, DruidExpressions.stringLiteral(format), DruidExpressions.stringLiteral(timeZone.getID()))
+    );
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateSub.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateSub.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateSub.java
index 2fef893..eaab703 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateSub.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateSub.java
@@ -17,34 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.udf.generic;
 
-import java.sql.Timestamp;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Calendar;
-import java.util.Date;
-
 import org.apache.hadoop.hive.ql.exec.Description;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorUDFDateSubColCol;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorUDFDateSubColScalar;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorUDFDateSubScalarCol;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.io.ByteWritable;
-import org.apache.hadoop.hive.serde2.io.DateWritable;
-import org.apache.hadoop.hive.serde2.io.ShortWritable;
-import org.apache.hadoop.hive.serde2.io.TimestampWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter.TimestampConverter;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.IntWritable;
+
+import java.text.SimpleDateFormat;
 
 /**
  * UDFDateSub.

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/test/queries/clientpositive/druidmini_expressions.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druidmini_expressions.q b/ql/src/test/queries/clientpositive/druidmini_expressions.q
index 72e6256..d88b281 100644
--- a/ql/src/test/queries/clientpositive/druidmini_expressions.q
+++ b/ql/src/test/queries/clientpositive/druidmini_expressions.q
@@ -81,4 +81,41 @@ select count(DISTINCT cstring2), sum(cdouble) FROM druid_table_n0 ;
 
 select count(distinct cstring2 || '_'|| cstring1), sum(cdouble), min(cint) FROM druid_table_n0;
 
+explain select unix_timestamp(from_unixtime(1396681200)) from druid_table_n0 limit 1;
+select unix_timestamp(from_unixtime(1396681200)) from druid_table_n0 limit 1;
+
+explain select unix_timestamp(`__time`) from druid_table_n0 limit 1;
+select unix_timestamp(`__time`) from druid_table_n0 limit 1;
+
+explain select TRUNC(cast(`__time` as timestamp), 'YY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YY');
+select TRUNC(cast(`__time` as timestamp), 'YY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YY');
+select TRUNC(cast(`__time` as timestamp), 'YEAR') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YEAR');
+select TRUNC(cast(`__time` as timestamp), 'YYYY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YYYY');
+
+explain select TRUNC(cast(`__time` as timestamp), 'MONTH') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MONTH');
+select TRUNC(cast(`__time` as timestamp), 'MONTH') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MONTH');
+select TRUNC(cast(`__time` as timestamp), 'MM') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MM');
+select TRUNC(cast(`__time` as timestamp), 'MON') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MON');
+
+explain select TRUNC(cast(`__time` as timestamp), 'QUARTER') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'QUARTER');
+select TRUNC(cast(`__time` as timestamp), 'QUARTER') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'QUARTER');
+select TRUNC(cast(`__time` as timestamp), 'Q') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'Q');
+
+explain select TO_DATE(`__time`) from druid_table_n0 GROUP BY TO_DATE(`__time`);
+select TO_DATE(`__time`) from druid_table_n0 GROUP BY TO_DATE(`__time`);
+
+EXPLAIN SELECT SUM((`druid_table_alias`.`cdouble` * `druid_table_alias`.`cdouble`)) AS `sum_calculation_4998925219892510720_ok`,
+  CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE) AS `tmn___time_ok`
+FROM `default`.`druid_table_n0` `druid_table_alias`
+GROUP BY CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE);
+
+SELECT SUM((`druid_table_alias`.`cdouble` * `druid_table_alias`.`cdouble`)) AS `sum_calculation_4998925219892510720_ok`,
+  CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE) AS `tmn___time_ok`
+FROM `default`.`druid_table_n0` `druid_table_alias`
+GROUP BY CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE);
+
+explain SELECT DATE_ADD(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_1,  DATE_SUB(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_2 from druid_table_n0  order by date_1, date_2 limit 3;
+SELECT DATE_ADD(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_1,  DATE_SUB(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_2 from druid_table_n0  order by date_1, date_2 limit 3;
+
+
 DROP TABLE druid_table_n0;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out b/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
index 30bc5d0..c1e54ea 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
@@ -889,6 +889,363 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@druid_table_n0
 POSTHOOK: Output: hdfs://### HDFS PATH ###
 6095	2.7308662809692383E7	-1073279343
+PREHOOK: query: explain select unix_timestamp(from_unixtime(1396681200)) from druid_table_n0 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select unix_timestamp(from_unixtime(1396681200)) from druid_table_n0 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_n0
+          properties:
+            druid.fieldNames vc
+            druid.fieldTypes bigint
+            druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"unix_timestamp(timestamp_format((1396681200 * '1000'),'yyyy-MM-dd HH:mm:ss','US/Pacific'),'yyyy-MM-dd HH:mm:ss')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList","limit":1}
+            druid.query.type scan
+          Select Operator
+            expressions: vc (type: bigint)
+            outputColumnNames: _col0
+            ListSink
+
+PREHOOK: query: select unix_timestamp(from_unixtime(1396681200)) from druid_table_n0 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select unix_timestamp(from_unixtime(1396681200)) from druid_table_n0 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1396656000
+PREHOOK: query: explain select unix_timestamp(`__time`) from druid_table_n0 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select unix_timestamp(`__time`) from druid_table_n0 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_n0
+          properties:
+            druid.fieldNames vc
+            druid.fieldTypes bigint
+            druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"div(\"__time\",1000)","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList","limit":1}
+            druid.query.type scan
+          Select Operator
+            expressions: vc (type: bigint)
+            outputColumnNames: _col0
+            ListSink
+
+PREHOOK: query: select unix_timestamp(`__time`) from druid_table_n0 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select unix_timestamp(`__time`) from druid_table_n0 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+-60
+PREHOOK: query: explain select TRUNC(cast(`__time` as timestamp), 'YY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YY')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select TRUNC(cast(`__time` as timestamp), 'YY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YY')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_n0
+          properties:
+            druid.fieldNames vc
+            druid.fieldTypes string
+            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(\"__time\",'P1Y','','US/Pacific'),'yyyy-MM-dd','US/Pacific')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type groupBy
+          Select Operator
+            expressions: vc (type: string)
+            outputColumnNames: _col0
+            ListSink
+
+PREHOOK: query: select TRUNC(cast(`__time` as timestamp), 'YY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YY')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TRUNC(cast(`__time` as timestamp), 'YY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YY')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-01-01
+PREHOOK: query: select TRUNC(cast(`__time` as timestamp), 'YEAR') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YEAR')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TRUNC(cast(`__time` as timestamp), 'YEAR') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YEAR')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-01-01
+PREHOOK: query: select TRUNC(cast(`__time` as timestamp), 'YYYY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YYYY')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TRUNC(cast(`__time` as timestamp), 'YYYY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YYYY')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-01-01
+PREHOOK: query: explain select TRUNC(cast(`__time` as timestamp), 'MONTH') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MONTH')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select TRUNC(cast(`__time` as timestamp), 'MONTH') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MONTH')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_n0
+          properties:
+            druid.fieldNames vc
+            druid.fieldTypes string
+            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(\"__time\",'P1M','','US/Pacific'),'yyyy-MM-dd','US/Pacific')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type groupBy
+          Select Operator
+            expressions: vc (type: string)
+            outputColumnNames: _col0
+            ListSink
+
+PREHOOK: query: select TRUNC(cast(`__time` as timestamp), 'MONTH') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MONTH')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TRUNC(cast(`__time` as timestamp), 'MONTH') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MONTH')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-01
+PREHOOK: query: select TRUNC(cast(`__time` as timestamp), 'MM') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MM')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TRUNC(cast(`__time` as timestamp), 'MM') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MM')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-01
+PREHOOK: query: select TRUNC(cast(`__time` as timestamp), 'MON') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MON')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TRUNC(cast(`__time` as timestamp), 'MON') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'MON')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-01
+PREHOOK: query: explain select TRUNC(cast(`__time` as timestamp), 'QUARTER') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'QUARTER')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select TRUNC(cast(`__time` as timestamp), 'QUARTER') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'QUARTER')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_n0
+          properties:
+            druid.fieldNames vc
+            druid.fieldTypes string
+            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(\"__time\",'P3M','','US/Pacific'),'yyyy-MM-dd','US/Pacific')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type groupBy
+          Select Operator
+            expressions: vc (type: string)
+            outputColumnNames: _col0
+            ListSink
+
+PREHOOK: query: select TRUNC(cast(`__time` as timestamp), 'QUARTER') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'QUARTER')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TRUNC(cast(`__time` as timestamp), 'QUARTER') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'QUARTER')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-10-01
+PREHOOK: query: select TRUNC(cast(`__time` as timestamp), 'Q') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'Q')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TRUNC(cast(`__time` as timestamp), 'Q') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'Q')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-10-01
+PREHOOK: query: explain select TO_DATE(`__time`) from druid_table_n0 GROUP BY TO_DATE(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select TO_DATE(`__time`) from druid_table_n0 GROUP BY TO_DATE(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_n0
+          properties:
+            druid.fieldNames vc
+            druid.fieldTypes date
+            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"LONG"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(\"__time\",'P1D','','US/Pacific')","outputType":"LONG"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type groupBy
+          Select Operator
+            expressions: vc (type: date)
+            outputColumnNames: _col0
+            ListSink
+
+PREHOOK: query: select TO_DATE(`__time`) from druid_table_n0 GROUP BY TO_DATE(`__time`)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select TO_DATE(`__time`) from druid_table_n0 GROUP BY TO_DATE(`__time`)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-31
+PREHOOK: query: EXPLAIN SELECT SUM((`druid_table_alias`.`cdouble` * `druid_table_alias`.`cdouble`)) AS `sum_calculation_4998925219892510720_ok`,
+  CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE) AS `tmn___time_ok`
+FROM `default`.`druid_table_n0` `druid_table_alias`
+GROUP BY CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT SUM((`druid_table_alias`.`cdouble` * `druid_table_alias`.`cdouble`)) AS `sum_calculation_4998925219892510720_ok`,
+  CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE) AS `tmn___time_ok`
+FROM `default`.`druid_table_n0` `druid_table_alias`
+GROUP BY CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_alias
+          properties:
+            druid.fieldNames vc,$f1
+            druid.fieldTypes date,double
+            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"LONG"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(timestamp_parse(timestamp_format(timestamp_floor(\"__time\",'P1M','','US/Pacific'),'yyyy-MM-dd','US/Pacific'),'','US/Pacific'),'P1D','','US/Pacific')","outputType":"LONG"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleSum","name":"$f1","expression":"(\"cdouble\" * \"cdouble\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type groupBy
+          Select Operator
+            expressions: $f1 (type: double), vc (type: date)
+            outputColumnNames: _col0, _col1
+            ListSink
+
+PREHOOK: query: SELECT SUM((`druid_table_alias`.`cdouble` * `druid_table_alias`.`cdouble`)) AS `sum_calculation_4998925219892510720_ok`,
+  CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE) AS `tmn___time_ok`
+FROM `default`.`druid_table_n0` `druid_table_alias`
+GROUP BY CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT SUM((`druid_table_alias`.`cdouble` * `druid_table_alias`.`cdouble`)) AS `sum_calculation_4998925219892510720_ok`,
+  CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE) AS `tmn___time_ok`
+FROM `default`.`druid_table_n0` `druid_table_alias`
+GROUP BY CAST(TRUNC(CAST(`druid_table_alias`.`__time` AS TIMESTAMP),'MM') AS DATE)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+3.832948323764436E14	1969-12-01
+PREHOOK: query: explain SELECT DATE_ADD(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_1,  DATE_SUB(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_2 from druid_table_n0  order by date_1, date_2 limit 3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain SELECT DATE_ADD(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_1,  DATE_SUB(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_2 from druid_table_n0  order by date_1, date_2 limit 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: druid_table_n0
+                  properties:
+                    druid.fieldNames vc,vc0
+                    druid.fieldTypes date,date
+                    druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_shift(timestamp_floor(\"__time\",'P1D','','US/Pacific'),'P1D',CAST((\"cdouble\" / CAST(1000, 'DOUBLE')), 'LONG'),'US/Pacific')","outputType":"LONG"},{"type":"expression","name":"vc0","expression":"timestamp_shift(timestamp_floor(\"__time\",'P1D','','US/Pacific'),'P1D',-( CAST((\"cdouble\" / CAST(1000, 'DOUBLE')), 'LONG') ),'US/Pacific')","outputType":"LONG"}],"columns":["vc","vc0"],"resultFormat":"compactedList"}
+                    druid.query.type scan
+                  Statistics: Num rows: 9173 Data size: 976192 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: vc (type: date), vc0 (type: date)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 9173 Data size: 976192 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: date), _col1 (type: date)
+                      sort order: ++
+                      Statistics: Num rows: 9173 Data size: 976192 Basic stats: COMPLETE Column stats: NONE
+                      TopN Hash Memory Usage: 0.1
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: date), KEY.reducesinkkey1 (type: date)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 9173 Data size: 976192 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 3 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 3 Data size: 318 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT DATE_ADD(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_1,  DATE_SUB(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_2 from druid_table_n0  order by date_1, date_2 limit 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_n0
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT DATE_ADD(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_1,  DATE_SUB(cast(`__time` as date), CAST((cdouble / 1000) AS INT)) as date_2 from druid_table_n0  order by date_1, date_2 limit 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_n0
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-02-26	1970-11-04
+1969-03-19	1970-10-14
+1969-11-13	1970-02-17
 PREHOOK: query: DROP TABLE druid_table_n0
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@druid_table_n0

http://git-wip-us.apache.org/repos/asf/hive/blob/2493e4a4/ql/src/test/results/clientpositive/infer_join_preds.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_join_preds.q.out b/ql/src/test/results/clientpositive/infer_join_preds.q.out
index ac6f565..d8163aa 100644
--- a/ql/src/test/results/clientpositive/infer_join_preds.q.out
+++ b/ql/src/test/results/clientpositive/infer_join_preds.q.out
@@ -1124,48 +1124,60 @@ STAGE PLANS:
             Filter Operator
               predicate: prid is not null (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: prid (type: bigint)
-                sort order: +
-                Map-reduce partition columns: prid (type: bigint)
+              Select Operator
+                expressions: idp_warehouse_id (type: bigint), prid (type: bigint), prtimesheetid (type: bigint), prassignmentid (type: bigint), prchargecodeid (type: bigint), prtypecodeid (type: bigint), prsequence (type: bigint), prmodby (type: varchar(96)), prmodtime (type: timestamp), prrmexported (type: bigint), prrmckdel (type: bigint), slice_status (type: int), role_id (type: bigint), user_lov1 (type: varchar(30)), user_lov2 (type: varchar(30)), incident_id (type: bigint), incident_investment_id (type: bigint), odf_ss_actuals (type: bigint), practsum (type: decimal(38,20))
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                value expressions: idp_warehouse_id (type: bigint), prtimesheetid (type: bigint), prassignmentid (type: bigint), prchargecodeid (type: bigint), prtypecodeid (type: bigint), prsequence (type: bigint), prmodby (type: varchar(96)), prmodtime (type: timestamp), prrmexported (type: bigint), prrmckdel (type: bigint), slice_status (type: int), role_id (type: bigint), user_lov1 (type: varchar(30)), user_lov2 (type: varchar(30)), incident_id (type: bigint), incident_investment_id (type: bigint), odf_ss_actuals (type: bigint), practsum (type: decimal(38,20))
+                Reduce Output Operator
+                  key expressions: _col1 (type: bigint)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: bigint)
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  value expressions: _col0 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: varchar(96)), _col8 (type: timestamp), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: int), _col12 (type: bigint), _col13 (type: varchar(30)), _col14 (type: varchar(30)), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: decimal(38,20))
           TableScan
-            alias: d
+            alias: e
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
               predicate: prid is not null (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: prid (type: bigint)
-                sort order: +
-                Map-reduce partition columns: prid (type: bigint)
+              Select Operator
+                expressions: prid (type: bigint)
+                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                value expressions: pruid (type: varchar(32)), prtimesheetid (type: bigint), prassignmentid (type: bigint), prchargecodeid (type: bigint), prtypecodeid (type: bigint), prsequence (type: bigint), prmodby (type: varchar(96)), prmodtime (type: timestamp), prrmexported (type: bigint), prrmckdel (type: bigint), slice_status (type: int), role_id (type: bigint), user_lov1 (type: varchar(30)), user_lov2 (type: varchar(30)), incident_id (type: bigint), incident_investment_id (type: bigint), odf_ss_actuals (type: bigint), practsum (type: decimal(38,20))
+                Reduce Output Operator
+                  key expressions: _col0 (type: bigint)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: bigint)
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           TableScan
-            alias: e
+            alias: d
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
               predicate: prid is not null (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: prid (type: bigint)
-                sort order: +
-                Map-reduce partition columns: prid (type: bigint)
+              Select Operator
+                expressions: pruid (type: varchar(32)), prid (type: bigint), prtimesheetid (type: bigint), prassignmentid (type: bigint), prchargecodeid (type: bigint), prtypecodeid (type: bigint), prsequence (type: bigint), prmodby (type: varchar(96)), prmodtime (type: timestamp), prrmexported (type: bigint), prrmckdel (type: bigint), slice_status (type: int), role_id (type: bigint), user_lov1 (type: varchar(30)), user_lov2 (type: varchar(30)), incident_id (type: bigint), incident_investment_id (type: bigint), odf_ss_actuals (type: bigint), practsum (type: decimal(38,20))
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col1 (type: bigint)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: bigint)
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  value expressions: _col0 (type: varchar(32)), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: varchar(96)), _col8 (type: timestamp), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: int), _col12 (type: bigint), _col13 (type: varchar(30)), _col14 (type: varchar(30)), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: decimal(38,20))
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
                Inner Join 0 to 2
           keys:
-            0 prid (type: bigint)
-            1 prid (type: bigint)
-            2 prid (type: bigint)
-          outputColumnNames: _col0, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, _col45, _col46, _col47, _col48, _col49, _col50
+            0 _col1 (type: bigint)
+            1 _col0 (type: bigint)
+            2 _col1 (type: bigint)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38
           Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: (concat(CASE WHEN (_col6 is null) THEN (1) ELSE (_col6) END, ',', CASE WHEN (_col7 is null) THEN (1) ELSE (_col7) END, ',', CASE WHEN (_col8 is null) THEN (1) ELSE (_col8) END, ',', CASE WHEN (_col9 is null) THEN (1) ELSE (_col9) END, ',', CASE WHEN (_col10 is null) THEN ('') ELSE (_col10) END, ',', CASE WHEN (_col23 is null) THEN (1) ELSE (_col23) END, ',', CASE WHEN (_col11 is null) THEN (1) ELSE (_col11) END, ',', CASE WHEN (length(_col12) is null) THEN ('') ELSE (_col12) END, ',', CASE WHEN (_col13 is null) THEN (TIMESTAMP'2017-12-08 00:00:00.0') ELSE (_col13) END, ',', CASE WHEN (_col14 is null) THEN (1) ELSE (_col14) END, ',', CASE WHEN (_col15 is null) THEN (1) ELSE (_col15) END, ',', CASE WHEN (_col16 is null) THEN (1) ELSE (_col16) END, ',', CASE WHEN (_col17 is null) THEN (1) ELSE (_col17) END, ',', CASE WHEN (length(_col18) is null) THEN ('') ELSE (_col18) END, ',', CASE WHEN (length(_col19) is null) THEN ('') ELSE (_col19) END, ',', CASE WHEN (_col
 20 is null) THEN (1) ELSE (_col20) END, ',', CASE WHEN (_col21 is null) THEN (1) ELSE (_col21) END, ',', CASE WHEN (_col22 is null) THEN (1) ELSE (_col22) END) <> concat(CASE WHEN (length(_col32) is null) THEN ('') ELSE (_col32) END, ',', CASE WHEN (_col33 is null) THEN (1) ELSE (_col33) END, ',', CASE WHEN (_col34 is null) THEN (1) ELSE (_col34) END, ',', CASE WHEN (_col35 is null) THEN (1) ELSE (_col35) END, ',', CASE WHEN (_col36 is null) THEN (1) ELSE (_col36) END, ',', CASE WHEN (_col37 is null) THEN ('') ELSE (_col37) END, ',', CASE WHEN (_col50 is null) THEN (1) ELSE (_col50) END, ',', CASE WHEN (_col38 is null) THEN (1) ELSE (_col38) END, ',', CASE WHEN (length(_col39) is null) THEN ('') ELSE (_col39) END, ',', CASE WHEN (_col40 is null) THEN (TIMESTAMP'2017-12-08 00:00:00.0') ELSE (_col40) END, ',', CASE WHEN (_col41 is null) THEN (1) ELSE (_col41) END, ',', CASE WHEN (_col42 is null) THEN (1) ELSE (_col42) END, ',', CASE WHEN (_col43 is null) THEN (1) ELSE (_col43) END, ',
 ', CASE WHEN (_col44 is null) THEN (1) ELSE (_col44) END, ',', CASE WHEN (length(_col45) is null) THEN ('') ELSE (_col45) END, ',', CASE WHEN (length(_col46) is null) THEN ('') ELSE (_col46) END, ',', CASE WHEN (_col47 is null) THEN (1) ELSE (_col47) END, ',', CASE WHEN (_col48 is null) THEN (1) ELSE (_col48) END, ',', CASE WHEN (_col49 is null) THEN (1) ELSE (_col49) END)) (type: boolean)
+            predicate: (concat(CASE WHEN (_col1 is null) THEN (1) ELSE (_col1) END, ',', CASE WHEN (_col2 is null) THEN (1) ELSE (_col2) END, ',', CASE WHEN (_col3 is null) THEN (1) ELSE (_col3) END, ',', CASE WHEN (_col4 is null) THEN (1) ELSE (_col4) END, ',', CASE WHEN (_col5 is null) THEN ('') ELSE (_col5) END, ',', CASE WHEN (_col18 is null) THEN (1) ELSE (_col18) END, ',', CASE WHEN (_col6 is null) THEN (1) ELSE (_col6) END, ',', CASE WHEN (length(_col7) is null) THEN ('') ELSE (_col7) END, ',', CASE WHEN (_col8 is null) THEN (TIMESTAMP'2017-12-08 00:00:00.0') ELSE (_col8) END, ',', CASE WHEN (_col9 is null) THEN (1) ELSE (_col9) END, ',', CASE WHEN (_col10 is null) THEN (1) ELSE (_col10) END, ',', CASE WHEN (_col11 is null) THEN (1) ELSE (_col11) END, ',', CASE WHEN (_col12 is null) THEN (1) ELSE (_col12) END, ',', CASE WHEN (length(_col13) is null) THEN ('') ELSE (_col13) END, ',', CASE WHEN (length(_col14) is null) THEN ('') ELSE (_col14) END, ',', CASE WHEN (_col15 is null
 ) THEN (1) ELSE (_col15) END, ',', CASE WHEN (_col16 is null) THEN (1) ELSE (_col16) END, ',', CASE WHEN (_col17 is null) THEN (1) ELSE (_col17) END) <> concat(CASE WHEN (length(_col20) is null) THEN ('') ELSE (_col20) END, ',', CASE WHEN (_col21 is null) THEN (1) ELSE (_col21) END, ',', CASE WHEN (_col22 is null) THEN (1) ELSE (_col22) END, ',', CASE WHEN (_col23 is null) THEN (1) ELSE (_col23) END, ',', CASE WHEN (_col24 is null) THEN (1) ELSE (_col24) END, ',', CASE WHEN (_col25 is null) THEN ('') ELSE (_col25) END, ',', CASE WHEN (_col38 is null) THEN (1) ELSE (_col38) END, ',', CASE WHEN (_col26 is null) THEN (1) ELSE (_col26) END, ',', CASE WHEN (length(_col27) is null) THEN ('') ELSE (_col27) END, ',', CASE WHEN (_col28 is null) THEN (TIMESTAMP'2017-12-08 00:00:00.0') ELSE (_col28) END, ',', CASE WHEN (_col29 is null) THEN (1) ELSE (_col29) END, ',', CASE WHEN (_col30 is null) THEN (1) ELSE (_col30) END, ',', CASE WHEN (_col31 is null) THEN (1) ELSE (_col31) END, ',', CASE WH
 EN (_col32 is null) THEN (1) ELSE (_col32) END, ',', CASE WHEN (length(_col33) is null) THEN ('') ELSE (_col33) END, ',', CASE WHEN (length(_col34) is null) THEN ('') ELSE (_col34) END, ',', CASE WHEN (_col35 is null) THEN (1) ELSE (_col35) END, ',', CASE WHEN (_col36 is null) THEN (1) ELSE (_col36) END, ',', CASE WHEN (_col37 is null) THEN (1) ELSE (_col37) END)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col0 (type: bigint)