You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/07/28 15:18:56 UTC
[08/10] hive git commit: HIVE-18729: Druid Time column type (Jesus
Camacho Rodriguez, reviewed by Ashutosh Chauhan)
HIVE-18729: Druid Time column type (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8e73f4be
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8e73f4be
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8e73f4be
Branch: refs/heads/master
Commit: 8e73f4be2f5cabae3d056ce592a2ffbfb8de196f
Parents: 47dd953
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Mon Jul 23 22:10:37 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Sat Jul 28 08:17:40 2018 -0700
----------------------------------------------------------------------
.../hive/druid/DruidStorageHandlerUtils.java | 4 -
.../hadoop/hive/druid/serde/DruidSerDe.java | 12 +-
.../hive/druid/serde/DruidSerDeUtils.java | 3 -
.../serde/HiveDruidSerializationModule.java | 40 ---
.../serde/PeriodGranularitySerializer.java | 54 ----
.../test/resources/testconfiguration.properties | 1 +
...tedDynPartitionTimeGranularityOptimizer.java | 33 ++-
.../calcite/translator/ExprNodeConverter.java | 3 +-
.../ql/parse/DruidSqlOperatorConverter.java | 43 +--
.../queries/clientpositive/druidmini_test_ts.q | 64 +++++
.../druid/druid_timestamptz.q.out | 10 +-
.../druid/druidmini_expressions.q.out | 22 +-
.../druid/druidmini_test_ts.q.out | 263 +++++++++++++++++++
.../hadoop/hive/serde2/io/DateWritable.java | 1 -
14 files changed, 399 insertions(+), 154 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 5a48d0f..3e2a171 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -90,7 +90,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.druid.serde.HiveDruidSerializationModule;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
@@ -195,9 +194,6 @@ public final class DruidStorageHandlerUtils {
JSON_MAPPER.setInjectableValues(injectableValues);
SMILE_MAPPER.setInjectableValues(injectableValues);
- HiveDruidSerializationModule hiveDruidSerializationModule = new HiveDruidSerializationModule();
- JSON_MAPPER.registerModule(hiveDruidSerializationModule);
- SMILE_MAPPER.registerModule(hiveDruidSerializationModule);
// Register the shard sub type to be used by the mapper
JSON_MAPPER.registerSubtypes(new NamedType(LinearShardSpec.class, "linear"));
JSON_MAPPER.registerSubtypes(new NamedType(NumberedShardSpec.class, "numbered"));
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
index df9049e..015924d 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
@@ -99,7 +99,6 @@ import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.hadoop.hive.druid.serde.DruidSerDeUtils.TIMESTAMP_FORMAT;
import static org.joda.time.format.ISODateTimeFormat.dateOptionalTimeParser;
/**
@@ -395,14 +394,15 @@ import static org.joda.time.format.ISODateTimeFormat.dateOptionalTimeParser;
}
switch (types[i].getPrimitiveCategory()) {
case TIMESTAMP:
+ final TimestampWritableV2 timestampWritable;
if (value instanceof Number) {
- output.add(new TimestampWritableV2(Timestamp.valueOf(
- ZonedDateTime.ofInstant(Instant.ofEpochMilli(((Number) value).longValue()), tsTZTypeInfo.timeZone())
- .format(DateTimeFormatter.ofPattern(TIMESTAMP_FORMAT)))));
+ timestampWritable = new TimestampWritableV2(
+ Timestamp.ofEpochMilli(((Number) value).longValue()));
} else {
- output.add(new TimestampWritableV2(Timestamp.valueOf((String) value)));
+ timestampWritable = new TimestampWritableV2(
+ Timestamp.valueOf((String) value));
}
-
+ output.add(timestampWritable);
break;
case TIMESTAMPLOCALTZ:
final long numberOfMillis;
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java
index 630e097..c04f2dc 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDeUtils.java
@@ -29,9 +29,6 @@ public final class DruidSerDeUtils {
private static final Logger LOG = LoggerFactory.getLogger(DruidSerDeUtils.class);
- protected static final String ISO_TIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
- protected static final String TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss";
-
protected static final String FLOAT_TYPE = "FLOAT";
protected static final String DOUBLE_TYPE = "DOUBLE";
protected static final String LONG_TYPE = "LONG";
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
deleted file mode 100644
index 8a110ae..0000000
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.druid.serde;
-
-import io.druid.java.util.common.granularity.PeriodGranularity;
-import io.druid.query.spec.LegacySegmentSpec;
-
-import com.fasterxml.jackson.core.util.VersionUtil;
-import com.fasterxml.jackson.databind.module.SimpleModule;
-
-import org.joda.time.Interval;
-
-/**
- * This class is used to define/override any serde behavior for classes from druid.
- * Currently it is used to override the default behavior when serializing PeriodGranularity to include user timezone.
- */
-public class HiveDruidSerializationModule extends SimpleModule {
- private static final String NAME = "HiveDruidSerializationModule";
- private static final VersionUtil VERSION_UTIL = new VersionUtil() {};
-
- public HiveDruidSerializationModule() {
- super(NAME, VERSION_UTIL.version());
- addSerializer(PeriodGranularity.class, new PeriodGranularitySerializer());
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java
deleted file mode 100644
index 10f9172..0000000
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/PeriodGranularitySerializer.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.druid.serde;
-
-import io.druid.java.util.common.granularity.PeriodGranularity;
-
-import com.fasterxml.jackson.core.JsonGenerator;
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.databind.JsonSerializer;
-import com.fasterxml.jackson.databind.SerializerProvider;
-import com.fasterxml.jackson.databind.jsontype.TypeSerializer;
-
-import org.joda.time.DateTimeZone;
-
-import java.io.IOException;
-
-public class PeriodGranularitySerializer extends JsonSerializer<PeriodGranularity> {
-
- @Override
- public void serialize(PeriodGranularity granularity, JsonGenerator jsonGenerator,
- SerializerProvider serializerProvider) throws IOException, JsonProcessingException {
- // Set timezone based on user timezone if origin is not already set
- // as it is default Hive time semantics to consider user timezone.
- PeriodGranularity granularityWithUserTimezone = new PeriodGranularity(
- granularity.getPeriod(),
- granularity.getOrigin(),
- DateTimeZone.getDefault()
- );
- granularityWithUserTimezone.serialize(jsonGenerator, serializerProvider);
- }
-
- @Override
- public void serializeWithType(PeriodGranularity value, JsonGenerator gen,
- SerializerProvider serializers, TypeSerializer typeSer) throws IOException {
- serialize(value, gen, serializers);
- }
-}
-
-
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 50bfe6a..125ad19 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1699,6 +1699,7 @@ spark.perf.disabled.query.files=query14.q,\
query64.q
druid.query.files=druidmini_test1.q,\
+ druidmini_test_ts.q,\
druid_basic2.q,\
druidmini_joins.q,\
druidmini_test_insert.q,\
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java
index 0ce359f..4297537 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionTimeGranularityOptimizer.java
@@ -267,6 +267,7 @@ public class SortedDynPartitionTimeGranularityOptimizer extends Transform {
Lists.newArrayList(fsParent.getSchema().getSignature());
final ArrayList<ExprNodeDesc> descs = Lists.newArrayList();
final List<String> colNames = Lists.newArrayList();
+ PrimitiveCategory timestampType = null;
int timestampPos = -1;
for (int i = 0; i < parentCols.size(); i++) {
ColumnInfo ci = parentCols.get(i);
@@ -274,11 +275,13 @@ public class SortedDynPartitionTimeGranularityOptimizer extends Transform {
descs.add(columnDesc);
colNames.add(columnDesc.getExprString());
if (columnDesc.getTypeInfo().getCategory() == ObjectInspector.Category.PRIMITIVE
- && ((PrimitiveTypeInfo) columnDesc.getTypeInfo()).getPrimitiveCategory() == PrimitiveCategory.TIMESTAMPLOCALTZ) {
+ && (((PrimitiveTypeInfo) columnDesc.getTypeInfo()).getPrimitiveCategory() == PrimitiveCategory.TIMESTAMP ||
+ ((PrimitiveTypeInfo) columnDesc.getTypeInfo()).getPrimitiveCategory() == PrimitiveCategory.TIMESTAMPLOCALTZ)) {
if (timestampPos != -1) {
- throw new SemanticException("Multiple columns with timestamp with local time-zone type on query result; "
- + "could not resolve which one is the timestamp with local time-zone column");
+ throw new SemanticException("Multiple columns with timestamp/timestamp with local time-zone type on query result; "
+ + "could not resolve which one is the right column");
}
+ timestampType = ((PrimitiveTypeInfo) columnDesc.getTypeInfo()).getPrimitiveCategory();
timestampPos = i;
}
}
@@ -327,8 +330,8 @@ public class SortedDynPartitionTimeGranularityOptimizer extends Transform {
}
- // Timestamp column type in Druid is timestamp with local time-zone, as it represents
- // a specific instant in time. Thus, we have this value and we need to extract the
+ // Timestamp column type in Druid is either timestamp or timestamp with local time-zone, i.e.,
+ // a specific instant in time. Thus, for the latest, we have this value and we need to extract the
// granularity to split the data when we are storing it in Druid. However, Druid stores
// the data in UTC. Thus, we need to apply the following logic on the data to extract
// the granularity correctly:
@@ -341,18 +344,20 @@ public class SortedDynPartitionTimeGranularityOptimizer extends Transform {
// #1 - Read the column value
ExprNodeDesc expr = new ExprNodeColumnDesc(parentCols.get(timestampPos));
- // #2 - UTC epoch for instant
- ExprNodeGenericFuncDesc f1 = new ExprNodeGenericFuncDesc(
- TypeInfoFactory.longTypeInfo, new GenericUDFEpochMilli(), Lists.newArrayList(expr));
- // #3 - Cast to timestamp
- ExprNodeGenericFuncDesc f2 = new ExprNodeGenericFuncDesc(
- TypeInfoFactory.timestampTypeInfo, new GenericUDFTimestamp(), Lists.newArrayList(f1));
+ if (timestampType == PrimitiveCategory.TIMESTAMPLOCALTZ) {
+ // #2 - UTC epoch for instant
+ expr = new ExprNodeGenericFuncDesc(
+ TypeInfoFactory.longTypeInfo, new GenericUDFEpochMilli(), Lists.newArrayList(expr));
+ // #3 - Cast to timestamp
+ expr = new ExprNodeGenericFuncDesc(
+ TypeInfoFactory.timestampTypeInfo, new GenericUDFTimestamp(), Lists.newArrayList(expr));
+ }
// #4 - We apply the granularity function
- ExprNodeGenericFuncDesc f3 = new ExprNodeGenericFuncDesc(
+ expr = new ExprNodeGenericFuncDesc(
TypeInfoFactory.timestampTypeInfo,
new GenericUDFBridge(udfName, false, udfClass.getName()),
- Lists.newArrayList(f2));
- descs.add(f3);
+ Lists.newArrayList(expr));
+ descs.add(expr);
colNames.add(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME);
// Add granularity to the row schema
final ColumnInfo ci = new ColumnInfo(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME, TypeInfoFactory.timestampTypeInfo,
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index d950991..6dd0018 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
import org.apache.hadoop.hive.common.type.Timestamp;
+import org.apache.hadoop.hive.common.type.TimestampTZUtil;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -323,7 +324,7 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
// Calcite stores timestamp with local time-zone in UTC internally, thus
// when we bring it back, we need to add the UTC suffix.
return new ExprNodeConstantDesc(TypeInfoFactory.getTimestampTZTypeInfo(conf.getLocalTimeZone()),
- literal.getValueAs(TimestampString.class).toString() + " UTC");
+ TimestampTZUtil.parse(literal.getValueAs(TimestampString.class).toString() + " UTC"));
case BINARY:
return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, literal.getValue3());
case DECIMAL:
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
index 6aa98c0..ece6e77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
@@ -38,6 +38,7 @@ import org.apache.calcite.sql.SqlFunction;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.sql.type.SqlTypeUtil;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveConcat;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateAddSqlOperator;
@@ -178,10 +179,11 @@ public class DruidSqlOperatorConverter {
return null;
}
if (SqlTypeUtil.isDatetime(call.getOperands().get(0).getType())) {
+ final TimeZone tz = timezoneId(query, call.getOperands().get(0));
return applyTimestampFormat(
- DruidExpressions.applyTimestampFloor(arg, Period.days(1).toString(), "", timezoneId(query)), YYYY_MM_DD,
- timezoneId(query)
- );
+ DruidExpressions.applyTimestampFloor(arg, Period.days(1).toString(), "", tz),
+ YYYY_MM_DD,
+ tz);
}
return null;
} else if (call.getOperands().size() == 2) {
@@ -207,9 +209,11 @@ public class DruidSqlOperatorConverter {
//bail out can not infer unit
return null;
}
- return applyTimestampFormat(DruidExpressions.applyTimestampFloor(arg, unit, "", timezoneId(query)), YYYY_MM_DD,
- timezoneId(query)
- );
+ final TimeZone tz = timezoneId(query, call.getOperands().get(0));
+ return applyTimestampFormat(
+ DruidExpressions.applyTimestampFloor(arg, unit, "", tz),
+ YYYY_MM_DD,
+ tz);
}
return null;
}
@@ -235,7 +239,11 @@ public class DruidSqlOperatorConverter {
if (arg == null) {
return null;
}
- return DruidExpressions.applyTimestampFloor(arg, Period.days(1).toString(), "", timezoneId(query));
+ return DruidExpressions.applyTimestampFloor(
+ arg,
+ Period.days(1).toString(),
+ "",
+ timezoneId(query, call.getOperands().get(0)));
}
}
@@ -288,7 +296,7 @@ public class DruidSqlOperatorConverter {
call.getOperands().size() == 1 ? DruidExpressions.stringLiteral(DEFAULT_TS_FORMAT) : DruidExpressions
.toDruidExpression(call.getOperands().get(1), rowType, query);
return DruidExpressions.functionCall("timestamp_format",
- ImmutableList.of(numMillis, format, DruidExpressions.stringLiteral(timezoneId(query).getID()))
+ ImmutableList.of(numMillis, format, DruidExpressions.stringLiteral(TimeZone.getTimeZone("UTC").getID()))
);
}
}
@@ -325,10 +333,13 @@ public class DruidSqlOperatorConverter {
}
final String steps = direction == -1 ? DruidQuery.format("-( %s )", arg1) : arg1;
- return DruidExpressions.functionCall("timestamp_shift", ImmutableList
- .of(arg0, DruidExpressions.stringLiteral("P1D"), steps,
- DruidExpressions.stringLiteral(timezoneId(query).getID())
- ));
+ return DruidExpressions.functionCall(
+ "timestamp_shift",
+ ImmutableList.of(
+ arg0,
+ DruidExpressions.stringLiteral("P1D"),
+ steps,
+ DruidExpressions.stringLiteral(timezoneId(query, call.getOperands().get(0)).getID())));
}
}
@@ -337,9 +348,11 @@ public class DruidSqlOperatorConverter {
* @param query Druid Rel
* @return time zone
*/
- private static TimeZone timezoneId(final DruidQuery query) {
- return TimeZone.getTimeZone(
- query.getTopNode().getCluster().getPlanner().getContext().unwrap(CalciteConnectionConfig.class).timeZone());
+ private static TimeZone timezoneId(final DruidQuery query, final RexNode arg) {
+ return arg.getType().getSqlTypeName() == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE
+ ? TimeZone.getTimeZone(
+ query.getTopNode().getCluster().getPlanner().getContext().unwrap(CalciteConnectionConfig.class).timeZone()) :
+ TimeZone.getTimeZone("UTC");
}
private static String applyTimestampFormat(String arg, String format, TimeZone timeZone) {
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/ql/src/test/queries/clientpositive/druidmini_test_ts.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druidmini_test_ts.q b/ql/src/test/queries/clientpositive/druidmini_test_ts.q
new file mode 100644
index 0000000..9e45ae6
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/druidmini_test_ts.q
@@ -0,0 +1,64 @@
+--! qt:dataset:alltypesorc
+CREATE TABLE druid_table_test_ts
+STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
+TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE")
+AS
+SELECT `ctimestamp1` as `__time`,
+ cstring1,
+ cstring2,
+ cdouble,
+ cfloat,
+ ctinyint,
+ csmallint,
+ cint,
+ cbigint,
+ cboolean1,
+ cboolean2
+ FROM alltypesorc where ctimestamp1 IS NOT NULL;
+
+-- Time Series Query
+SELECT count(*) FROM druid_table_test_ts;
+
+SELECT floor_year(`__time`), SUM(cfloat), SUM(cdouble), SUM(ctinyint), SUM(csmallint),SUM(cint), SUM(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`);
+
+SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`);
+
+SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`);
+
+
+-- Group By
+
+SELECT cstring1, SUM(cdouble) as s FROM druid_table_test_ts GROUP BY cstring1 ORDER BY s ASC LIMIT 10;
+
+SELECT cstring2, MAX(cdouble) FROM druid_table_test_ts GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10;
+
+
+-- TIME STUFF
+
+SELECT `__time`
+FROM druid_table_test_ts ORDER BY `__time` ASC LIMIT 10;
+
+SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` < '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10;
+
+SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10;
+
+SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00'
+ AND `__time` < '2011-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10;
+
+SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10;;
+
+SELECT `__time`
+FROM druid_table_test_ts
+WHERE (`__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00')
+ OR (`__time` BETWEEN '1968-02-01 00:00:00' AND '1970-04-01 00:00:00') ORDER BY `__time` ASC LIMIT 10;
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/ql/src/test/results/clientpositive/druid/druid_timestamptz.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druid_timestamptz.q.out b/ql/src/test/results/clientpositive/druid/druid_timestamptz.q.out
index fa9583a..f37e80f 100644
--- a/ql/src/test/results/clientpositive/druid/druid_timestamptz.q.out
+++ b/ql/src/test/results/clientpositive/druid/druid_timestamptz.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes timestamp
- druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+ druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),null,'UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
druid.query.type scan
Select Operator
expressions: vc (type: timestamp)
@@ -107,7 +107,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes timestamp
- druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["2016-01-03T20:26:34.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+ druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["2016-01-03T20:26:34.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),null,'UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
druid.query.type scan
Select Operator
expressions: vc (type: timestamp)
@@ -140,7 +140,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes int
- druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_extract(\"__time\",'HOUR','US/Pacific')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+ druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_extract(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),null,'UTC'),'HOUR','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
druid.query.type scan
Select Operator
expressions: vc (type: int)
@@ -173,7 +173,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes timestamp
- druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(\"__time\",'PT1H','','US/Pacific')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+ druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),null,'UTC'),'PT1H','','UTC')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
druid.query.type scan
Select Operator
expressions: vc (type: timestamp)
@@ -338,7 +338,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes timestamp
- druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["2016-01-03T20:26:34.000Z/2016-01-03T20:26:34.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+ druid.query.json {"queryType":"scan","dataSource":"default.tstz1_n0","intervals":["2016-01-03T20:26:34.000Z/2016-01-03T20:26:34.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"1451852794000","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
druid.query.type scan
Select Operator
expressions: vc (type: timestamp)
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out b/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
index 0f83267..144032e 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_expressions.q.out
@@ -933,7 +933,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes bigint
- druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"unix_timestamp(timestamp_format((1396681200 * '1000'),'yyyy-MM-dd HH:mm:ss','US/Pacific'),'yyyy-MM-dd HH:mm:ss')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList","limit":1}
+ druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"unix_timestamp(timestamp_format((1396681200 * '1000'),'yyyy-MM-dd HH:mm:ss','UTC'),'yyyy-MM-dd HH:mm:ss')","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList","limit":1}
druid.query.type scan
Select Operator
expressions: vc (type: bigint)
@@ -948,7 +948,7 @@ POSTHOOK: query: select unix_timestamp(from_unixtime(1396681200)) from druid_tab
POSTHOOK: type: QUERY
POSTHOOK: Input: default@druid_table_n0
POSTHOOK: Output: hdfs://### HDFS PATH ###
-1396656000
+1396681200
PREHOOK: query: explain select unix_timestamp(`__time`) from druid_table_n0 limit 1
PREHOOK: type: QUERY
POSTHOOK: query: explain select unix_timestamp(`__time`) from druid_table_n0 limit 1
@@ -1003,7 +1003,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes string
- druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format((div(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),1000) * '1000'),'yyyy-MM-dd HH:mm:ss','US/Pacific')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format((div(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),1000) * '1000'),'yyyy-MM-dd HH:mm:ss','UTC')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
druid.query.type groupBy
Select Operator
expressions: vc (type: string)
@@ -1022,8 +1022,8 @@ GROUP BY FROM_UNIXTIME(UNIX_TIMESTAMP(CAST(`__time` as timestamp ),'yyyy-MM-dd H
POSTHOOK: type: QUERY
POSTHOOK: Input: default@druid_table_n0
POSTHOOK: Output: hdfs://### HDFS PATH ###
-1969-12-31 07:59:00
-1969-12-31 08:00:00
+1969-12-31 15:59:00
+1969-12-31 16:00:00
PREHOOK: query: explain select TRUNC(cast(`__time` as timestamp), 'YY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YY')
PREHOOK: type: QUERY
POSTHOOK: query: explain select TRUNC(cast(`__time` as timestamp), 'YY') from druid_table_n0 GROUP BY TRUNC(cast(`__time` as timestamp), 'YY')
@@ -1041,7 +1041,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes string
- druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1Y','','US/Pacific'),'yyyy-MM-dd','US/Pacific')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1Y','','UTC'),'yyyy-MM-dd','UTC')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
druid.query.type groupBy
Select Operator
expressions: vc (type: string)
@@ -1092,7 +1092,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes string
- druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1M','','US/Pacific'),'yyyy-MM-dd','US/Pacific')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1M','','UTC'),'yyyy-MM-dd','UTC')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
druid.query.type groupBy
Select Operator
expressions: vc (type: string)
@@ -1143,7 +1143,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc
druid.fieldTypes string
- druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P3M','','US/Pacific'),'yyyy-MM-dd','US/Pacific')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"STRING"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_format(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P3M','','UTC'),'yyyy-MM-dd','UTC')","outputType":"STRING"}],"limitSpec":{"type":"default"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
druid.query.type groupBy
Select Operator
expressions: vc (type: string)
@@ -1224,7 +1224,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc,$f1
druid.fieldTypes date,double
- druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"LONG"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(timestamp_parse(timestamp_format(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1M','','US/Pacific'),'yyyy-MM-dd','US/Pacific'),'','UTC'),'P1D','','UTC')","outputType":"LONG"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleSum","name":"$f1","expression":"(\"cdouble\" * \"cdouble\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table_n0","granularity":"all","dimensions":[{"type":"default","dimension":"vc","outputName":"vc","outputType":"LONG"}],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_floor(timestamp_parse(timestamp_format(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1M','','UTC'),'yyyy-MM-dd','UTC'),'','UTC'),'P1D','','UTC')","outputType":"LONG"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleSum","name":"$f1","expression":"(\"cdouble\" * \"cdouble\")"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
druid.query.type groupBy
Select Operator
expressions: $f1 (type: double), vc (type: date)
@@ -1269,7 +1269,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc,vc0
druid.fieldTypes date,date
- druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_shift(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1D','','UTC'),'P1D',CAST((\"cdouble\" / CAST(1000, 'DOUBLE')), 'LONG'),'US/Pacific')","outputType":"LONG"},{"type":"expression","name":"vc0","expression":"timestamp_shift(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1D','','UTC'),'P1D',-( CAST((\"cdouble\" / CAST(1000, 'DOUBLE')), 'LONG') ),'US/Pacific')","outputType":"LONG"}],"columns":["vc","vc0"],"resultFormat":"compactedList"}
+ druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"timestamp_shift(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1D','','UTC'),'P1D',CAST((\"cdouble\" / CAST(1000, 'DOUBLE')), 'LONG'),'UTC')","outputType":"LONG"},{"type":"expression","name":"vc0","expression":"timestamp_shift(timestamp_floor(timestamp_parse(timestamp_format(\"__time\",'yyyy-MM-dd\\u0027T\\u0027HH:mm:ss.SSS\\u0027Z\\u0027','US/Pacific'),'','UTC'),'P1D','','UTC'),'P1D',-( CAST((\"cdouble\" / CAST(1000, 'DOUBLE')), 'LONG') ),'UTC')","outputType":"LONG"}],"columns":["vc","vc0"],"resultFormat":"compactedList"}
druid.query.type scan
Statistics: Num rows: 9173 Data size: 976192 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -1316,7 +1316,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@druid_table_n0
POSTHOOK: Output: hdfs://### HDFS PATH ###
1969-02-26 1970-11-04
-1969-03-19 1970-10-13
+1969-03-19 1970-10-14
1969-11-13 1970-02-17
PREHOOK: query: -- Boolean Values
SELECT cboolean2, count(*) from druid_table_n0 GROUP BY cboolean2
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/ql/src/test/results/clientpositive/druid/druidmini_test_ts.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_test_ts.q.out b/ql/src/test/results/clientpositive/druid/druidmini_test_ts.q.out
new file mode 100644
index 0000000..879e2a7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/druid/druidmini_test_ts.q.out
@@ -0,0 +1,263 @@
+PREHOOK: query: CREATE TABLE druid_table_test_ts
+STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
+TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE")
+AS
+SELECT `ctimestamp1` as `__time`,
+ cstring1,
+ cstring2,
+ cdouble,
+ cfloat,
+ ctinyint,
+ csmallint,
+ cint,
+ cbigint,
+ cboolean1,
+ cboolean2
+ FROM alltypesorc where ctimestamp1 IS NOT NULL
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_test_ts
+POSTHOOK: query: CREATE TABLE druid_table_test_ts
+STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
+TBLPROPERTIES ("druid.segment.granularity" = "HOUR", "druid.query.granularity" = "MINUTE")
+AS
+SELECT `ctimestamp1` as `__time`,
+ cstring1,
+ cstring2,
+ cdouble,
+ cfloat,
+ ctinyint,
+ csmallint,
+ cint,
+ cbigint,
+ cboolean1,
+ cboolean2
+ FROM alltypesorc where ctimestamp1 IS NOT NULL
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_test_ts
+POSTHOOK: Lineage: druid_table_test_ts.__time SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: druid_table_test_ts.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: SELECT count(*) FROM druid_table_test_ts
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT count(*) FROM druid_table_test_ts
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+6105
+PREHOOK: query: SELECT floor_year(`__time`), SUM(cfloat), SUM(cdouble), SUM(ctinyint), SUM(csmallint),SUM(cint), SUM(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT floor_year(`__time`), SUM(cfloat), SUM(cdouble), SUM(ctinyint), SUM(csmallint),SUM(cint), SUM(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-01-01 00:00:00 -39590.24724686146 2.7308662809692383E7 -39967 7781089 1408069801800 10992545287
+PREHOOK: query: SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-01-01 00:00:00 -1790.7781 -308691.84375 2 14255 -1073279343 -8577981133
+PREHOOK: query: SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint)
+FROM druid_table_test_ts GROUP BY floor_year(`__time`)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-01-01 00:00:00 769.16394 1.9565518E7 -45 -8101 1276572707 4923772860
+PREHOOK: query: SELECT cstring1, SUM(cdouble) as s FROM druid_table_test_ts GROUP BY cstring1 ORDER BY s ASC LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT cstring1, SUM(cdouble) as s FROM druid_table_test_ts GROUP BY cstring1 ORDER BY s ASC LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1cGVWH7n1QU -596096.6875
+821UdmGbkEf4j -14161.827026367188
+00iT08 0.0
+02v8WnLuYDos3Cq 0.0
+yv1js 0.0
+02VRbSC5I 0.0
+014ILGhXxNY7g02hl0Xw 0.0
+02vDyIVT752 0.0
+00PafC7v 0.0
+ytpx1RL8F2I 0.0
+PREHOOK: query: SELECT cstring2, MAX(cdouble) FROM druid_table_test_ts GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT cstring2, MAX(cdouble) FROM druid_table_test_ts GROUP BY cstring2 ORDER BY cstring2 ASC LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+NULL 1.9565518E7
+0034fkcXMQI3 15601.0
+004J8y 0.0
+00GNm -200.0
+00GW4dnb6Wgj52 -200.0
+00PBhB1Iefgk 0.0
+00d5kr1wEB7evExG 15601.0
+00qccwt8n 0.0
+017fFeQ3Gcsa83Xj2Vo0 0.0
+01EfkvNk6mjG44uxs 0.0
+PREHOOK: query: SELECT `__time`
+FROM druid_table_test_ts ORDER BY `__time` ASC LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT `__time`
+FROM druid_table_test_ts ORDER BY `__time` ASC LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+PREHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` < '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` < '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+PREHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00' ORDER BY `__time` ASC LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+PREHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00'
+ AND `__time` < '2011-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` >= '1968-01-01 00:00:00' AND `__time` <= '1970-03-01 00:00:00'
+ AND `__time` < '2011-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+PREHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE `__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00' ORDER BY `__time` ASC LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+PREHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE (`__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00')
+ OR (`__time` BETWEEN '1968-02-01 00:00:00' AND '1970-04-01 00:00:00') ORDER BY `__time` ASC LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@druid_table_test_ts
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT `__time`
+FROM druid_table_test_ts
+WHERE (`__time` BETWEEN '1968-01-01 00:00:00' AND '1970-01-01 00:00:00')
+ OR (`__time` BETWEEN '1968-02-01 00:00:00' AND '1970-04-01 00:00:00') ORDER BY `__time` ASC LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@druid_table_test_ts
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
+1969-12-31 15:59:00
http://git-wip-us.apache.org/repos/asf/hive/blob/8e73f4be/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java b/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
index 3894e09..6325d5d 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.io.WritableUtils;
* YYYY-MM-DD
*
*/
-@Deprecated
public class DateWritable implements WritableComparable<DateWritable> {
private static final long MILLIS_PER_DAY = TimeUnit.DAYS.toMillis(1);