You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2017/10/16 23:48:46 UTC
[2/4] hive git commit: HIVE-17672: Upgrade Calcite version to 1.14
(Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
http://git-wip-us.apache.org/repos/asf/hive/blob/9975131c/ql/src/test/results/clientpositive/druid_basic2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out
index 5d6862c..753ccb4 100644
--- a/ql/src/test/results/clientpositive/druid_basic2.q.out
+++ b/ql/src/test/results/clientpositive/druid_basic2.q.out
@@ -17,7 +17,7 @@ POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@druid_table_1
# col_name data_type comment
-__time timestamp from deserializer
+__time timestamp with local time zone from deserializer
robot string from deserializer
namespace string from deserializer
anonymous string from deserializer
@@ -271,7 +271,7 @@ STAGE PLANS:
column.name.delimiter ,
columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
- columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+ columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
@@ -280,7 +280,7 @@ STAGE PLANS:
numFiles 0
numRows 0
rawDataSize 0
- serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+ serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
serialization.format 1
serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
@@ -297,7 +297,7 @@ STAGE PLANS:
column.name.delimiter ,
columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
- columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+ columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
@@ -306,7 +306,7 @@ STAGE PLANS:
numFiles 0
numRows 0
rawDataSize 0
- serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+ serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
serialization.format 1
serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
@@ -439,7 +439,7 @@ STAGE PLANS:
column.name.delimiter ,
columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
- columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+ columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
@@ -448,7 +448,7 @@ STAGE PLANS:
numFiles 0
numRows 0
rawDataSize 0
- serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+ serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
serialization.format 1
serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
@@ -465,7 +465,7 @@ STAGE PLANS:
column.name.delimiter ,
columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
- columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+ columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
@@ -474,7 +474,7 @@ STAGE PLANS:
numFiles 0
numRows 0
rawDataSize 0
- serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+ serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
serialization.format 1
serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
@@ -553,12 +553,12 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
Select Operator
- expressions: robot (type: string), floor_day (type: timestamp), $f3 (type: float), $f4 (type: float), UDFToInteger(robot) (type: int)
+ expressions: robot (type: string), floor_day (type: timestamp with local time zone), $f3 (type: float), $f4 (type: float), UDFToInteger(robot) (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col5
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
@@ -569,7 +569,7 @@ STAGE PLANS:
tag: -1
TopN: 10
TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: timestamp), _col3 (type: float)
+ value expressions: _col0 (type: string), _col1 (type: timestamp with local time zone), _col3 (type: float)
auto parallelism: false
Path -> Alias:
#### A masked pattern was here ####
@@ -586,16 +586,16 @@ STAGE PLANS:
column.name.delimiter ,
columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
- columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+ columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
#### A masked pattern was here ####
name default.druid_table_1
numFiles 0
numRows 0
rawDataSize 0
- serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+ serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
serialization.format 1
serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
@@ -612,16 +612,16 @@ STAGE PLANS:
column.name.delimiter ,
columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
- columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+ columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
#### A masked pattern was here ####
name default.druid_table_1
numFiles 0
numRows 0
rawDataSize 0
- serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+ serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
serialization.format 1
serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
@@ -635,7 +635,7 @@ STAGE PLANS:
Needs Tagging: false
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: float)
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp with local time zone), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Limit
@@ -653,7 +653,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
properties:
columns _col0,_col1,_col2,_col3
- columns.types string:timestamp:float:float
+ columns.types string:timestamp with local time zone:float:float
escape.delim \
hive.serialization.extend.additional.nesting.levels true
serialization.escape.crlf true
@@ -737,32 +737,32 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: druid_table_1
- filterExpr: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
+ filterExpr: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 US/Pacific AND 1999-11-10 00:00:00.0 US/Pacific (type: boolean)
properties:
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
- predicate: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
+ predicate: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 US/Pacific AND 1999-11-10 00:00:00.0 US/Pacific (type: boolean)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), floor_day(__time) (type: timestamp)
+ expressions: robot (type: string), floor_day(__time) (type: timestamp with local time zone)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Group By Operator
- keys: _col0 (type: string), _col1 (type: timestamp)
+ keys: _col0 (type: string), _col1 (type: timestamp with local time zone)
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: timestamp)
+ key expressions: _col0 (type: string), _col1 (type: timestamp with local time zone)
sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp)
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp with local time zone)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
TopN Hash Memory Usage: 0.1
Reduce Operator Tree:
Group By Operator
- keys: KEY._col0 (type: string), KEY._col1 (type: timestamp)
+ keys: KEY._col0 (type: string), KEY._col1 (type: timestamp with local time zone)
mode: mergepartial
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -782,10 +782,10 @@ STAGE PLANS:
sort order: +
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
TopN Hash Memory Usage: 0.1
- value expressions: _col1 (type: timestamp)
+ value expressions: _col1 (type: timestamp with local time zone)
Reduce Operator Tree:
Select Operator
- expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp)
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp with local time zone)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Limit
@@ -831,16 +831,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: druid_table_1
- filterExpr: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
+ filterExpr: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 US/Pacific AND 1999-11-10 00:00:00.0 US/Pacific (type: boolean)
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
- predicate: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
+ predicate: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 US/Pacific AND 1999-11-10 00:00:00.0 US/Pacific (type: boolean)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), extract (type: timestamp)
+ expressions: robot (type: string), extract (type: timestamp with local time zone)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
@@ -848,10 +848,10 @@ STAGE PLANS:
sort order: +
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
TopN Hash Memory Usage: 0.1
- value expressions: _col1 (type: timestamp)
+ value expressions: _col1 (type: timestamp with local time zone)
Reduce Operator Tree:
Select Operator
- expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp)
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp with local time zone)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Limit
@@ -898,11 +898,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1999-11-01T00:00:00.000/1999-11-10T00:00:00.001"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1999-11-01T08:00:00.000/1999-11-10T08:00:00.001"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), floor_day (type: timestamp)
+ expressions: robot (type: string), floor_day (type: timestamp with local time zone)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -935,20 +935,20 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
Select Operator
- expressions: __time (type: timestamp), robot (type: string), language (type: string), added (type: float), delta (type: float)
+ expressions: __time (type: timestamp with local time zone), robot (type: string), language (type: string), added (type: float), delta (type: float)
outputColumnNames: __time, robot, language, added, delta
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Group By Operator
aggregations: max(added), sum(delta)
- keys: robot (type: string), language (type: string), floor_day(__time) (type: timestamp)
+ keys: robot (type: string), language (type: string), floor_day(__time) (type: timestamp with local time zone)
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp)
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp with local time zone)
null sort order: aaa
sort order: +++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp)
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp with local time zone)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
tag: -1
value expressions: _col3 (type: float), _col4 (type: double)
@@ -968,14 +968,14 @@ STAGE PLANS:
column.name.delimiter ,
columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
- columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+ columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
#### A masked pattern was here ####
name default.druid_table_1
numFiles 0
numRows 0
rawDataSize 0
- serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+ serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
serialization.format 1
serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
@@ -992,14 +992,14 @@ STAGE PLANS:
column.name.delimiter ,
columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
- columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+ columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
#### A masked pattern was here ####
name default.druid_table_1
numFiles 0
numRows 0
rawDataSize 0
- serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+ serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
serialization.format 1
serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
@@ -1014,12 +1014,12 @@ STAGE PLANS:
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), sum(VALUE._col1)
- keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: timestamp)
+ keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: timestamp with local time zone)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: _col0 (type: string), _col2 (type: timestamp), _col3 (type: float), _col4 (type: double)
+ expressions: _col0 (type: string), _col2 (type: timestamp with local time zone), _col3 (type: float), _col4 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
File Output Operator
@@ -1033,7 +1033,7 @@ STAGE PLANS:
properties:
column.name.delimiter ,
columns _col0,_col1,_col2,_col3
- columns.types string,timestamp,float,double
+ columns.types string,timestamp with local time zone,float,double
escape.delim \
serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -1054,7 +1054,7 @@ STAGE PLANS:
tag: -1
TopN: 10
TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: timestamp), _col3 (type: double)
+ value expressions: _col0 (type: string), _col1 (type: timestamp with local time zone), _col3 (type: double)
auto parallelism: false
Path -> Alias:
#### A masked pattern was here ####
@@ -1067,7 +1067,7 @@ STAGE PLANS:
properties:
column.name.delimiter ,
columns _col0,_col1,_col2,_col3
- columns.types string,timestamp,float,double
+ columns.types string,timestamp with local time zone,float,double
escape.delim \
serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -1077,7 +1077,7 @@ STAGE PLANS:
properties:
column.name.delimiter ,
columns _col0,_col1,_col2,_col3
- columns.types string,timestamp,float,double
+ columns.types string,timestamp with local time zone,float,double
escape.delim \
serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -1086,7 +1086,7 @@ STAGE PLANS:
Needs Tagging: false
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: double)
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp with local time zone), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Limit
@@ -1104,7 +1104,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
properties:
columns _col0,_col1,_col2,_col3
- columns.types string:timestamp:float:double
+ columns.types string:timestamp with local time zone:float:double
escape.delim \
hive.serialization.extend.additional.nesting.levels true
serialization.escape.crlf true
http://git-wip-us.apache.org/repos/asf/hive/blob/9975131c/ql/src/test/results/clientpositive/druid_intervals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out b/ql/src/test/results/clientpositive/druid_intervals.q.out
index 63f28d5..3cd28b5 100644
--- a/ql/src/test/results/clientpositive/druid_intervals.q.out
+++ b/ql/src/test/results/clientpositive/druid_intervals.q.out
@@ -17,7 +17,7 @@ POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@druid_table_1
# col_name data_type comment
-__time timestamp from deserializer
+__time timestamp with local time zone from deserializer
robot string from deserializer
namespace string from deserializer
anonymous string from deserializer
@@ -82,7 +82,7 @@ STAGE PLANS:
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp)
+ expressions: __time (type: timestamp with local time zone)
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -108,11 +108,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T08:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp)
+ expressions: __time (type: timestamp with local time zone)
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -138,11 +138,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-03-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2012-03-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp)
+ expressions: __time (type: timestamp with local time zone)
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -170,11 +170,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2011-01-01T08:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp)
+ expressions: __time (type: timestamp with local time zone)
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -200,11 +200,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2011-01-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp)
+ expressions: __time (type: timestamp with local time zone)
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -232,11 +232,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001","2012-01-01T00:00:00.000/2013-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2011-01-01T08:00:00.001","2012-01-01T08:00:00.000/2013-01-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp)
+ expressions: __time (type: timestamp with local time zone)
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -264,11 +264,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2012-01-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp)
+ expressions: __time (type: timestamp with local time zone)
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -294,11 +294,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2010-01-01T08:00:00.001","2011-01-01T08:00:00.000/2011-01-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp)
+ expressions: __time (type: timestamp with local time zone)
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -324,11 +324,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2010-01-01T08:00:00.001","2011-01-01T08:00:00.000/2011-01-01T08:00:00.001"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), 'user1' (type: string)
+ expressions: __time (type: timestamp with local time zone), 'user1' (type: string)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -353,16 +353,16 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: druid_table_1
- filterExpr: ((__time) IN (2010-01-01 00:00:00.0, 2011-01-01 00:00:00.0) or (robot = 'user1')) (type: boolean)
+ filterExpr: ((__time) IN (2010-01-01 00:00:00.0 US/Pacific, 2011-01-01 00:00:00.0 US/Pacific) or (robot = 'user1')) (type: boolean)
properties:
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
- predicate: ((__time) IN (2010-01-01 00:00:00.0, 2011-01-01 00:00:00.0) or (robot = 'user1')) (type: boolean)
+ predicate: ((__time) IN (2010-01-01 00:00:00.0 US/Pacific, 2011-01-01 00:00:00.0 US/Pacific) or (robot = 'user1')) (type: boolean)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), robot (type: string)
+ expressions: __time (type: timestamp with local time zone), robot (type: string)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
File Output Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/9975131c/ql/src/test/results/clientpositive/druid_timeseries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out
index 04db1e0..330c068 100644
--- a/ql/src/test/results/clientpositive/druid_timeseries.q.out
+++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -17,7 +17,7 @@ POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@druid_table_1
# col_name data_type comment
-__time timestamp from deserializer
+__time timestamp with local time zone from deserializer
robot string from deserializer
namespace string from deserializer
anonymous string from deserializer
@@ -108,11 +108,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: extract (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: extract (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -142,7 +142,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -172,7 +172,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -202,7 +202,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -232,7 +232,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -262,7 +262,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -292,7 +292,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -322,7 +322,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -352,7 +352,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -384,7 +384,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -393,16 +393,16 @@ PREHOOK: query: EXPLAIN
SELECT floor_hour(`__time`), max(added), sum(variation)
FROM druid_table_1
WHERE floor_hour(`__time`)
- BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
- AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+ BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
+ AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
GROUP BY floor_hour(`__time`)
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN
SELECT floor_hour(`__time`), max(added), sum(variation)
FROM druid_table_1
WHERE floor_hour(`__time`)
- BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
- AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+ BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
+ AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
GROUP BY floor_hour(`__time`)
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
@@ -415,34 +415,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: druid_table_1
- filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+ filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean)
properties:
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":[],"metrics":["added","variation"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
- predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+ predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: floor_hour(__time) (type: timestamp), added (type: float), variation (type: float)
+ expressions: floor_hour(__time) (type: timestamp with local time zone), added (type: float), variation (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Group By Operator
aggregations: max(_col1), sum(_col2)
- keys: _col0 (type: timestamp)
+ keys: _col0 (type: timestamp with local time zone)
mode: hash
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: timestamp)
+ key expressions: _col0 (type: timestamp with local time zone)
sort order: +
- Map-reduce partition columns: _col0 (type: timestamp)
+ Map-reduce partition columns: _col0 (type: timestamp with local time zone)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
value expressions: _col1 (type: float), _col2 (type: double)
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), sum(VALUE._col1)
- keys: KEY._col0 (type: timestamp)
+ keys: KEY._col0 (type: timestamp with local time zone)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -468,8 +468,8 @@ FROM
FROM druid_table_1
GROUP BY floor_hour(`__time`)
) subq
-WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
- AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
+ AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN
SELECT subq.h, subq.m, subq.s
@@ -479,8 +479,8 @@ FROM
FROM druid_table_1
GROUP BY floor_hour(`__time`)
) subq
-WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
- AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
+ AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
@@ -492,34 +492,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: druid_table_1
- filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+ filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean)
properties:
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":[],"metrics":["added","variation"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
- predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+ predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: floor_hour(__time) (type: timestamp), added (type: float), variation (type: float)
+ expressions: floor_hour(__time) (type: timestamp with local time zone), added (type: float), variation (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Group By Operator
aggregations: max(_col1), sum(_col2)
- keys: _col0 (type: timestamp)
+ keys: _col0 (type: timestamp with local time zone)
mode: hash
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: timestamp)
+ key expressions: _col0 (type: timestamp with local time zone)
sort order: +
- Map-reduce partition columns: _col0 (type: timestamp)
+ Map-reduce partition columns: _col0 (type: timestamp with local time zone)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
value expressions: _col1 (type: float), _col2 (type: double)
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), sum(VALUE._col1)
- keys: KEY._col0 (type: timestamp)
+ keys: KEY._col0 (type: timestamp with local time zone)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
http://git-wip-us.apache.org/repos/asf/hive/blob/9975131c/ql/src/test/results/clientpositive/druid_topn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out
index 5c31f85..3e2b477 100644
--- a/ql/src/test/results/clientpositive/druid_topn.q.out
+++ b/ql/src/test/results/clientpositive/druid_topn.q.out
@@ -17,7 +17,7 @@ POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@druid_table_1
# col_name data_type comment
-__time timestamp from deserializer
+__time timestamp with local time zone from deserializer
robot string from deserializer
namespace string from deserializer
anonymous string from deserializer
@@ -118,11 +118,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), extract (type: timestamp), $f2 (type: float), $f3 (type: float)
+ expressions: robot (type: string), extract (type: timestamp with local time zone), $f2 (type: float), $f3 (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -152,11 +152,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_year","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"year","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_year","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"year","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), floor_year (type: timestamp), $f2 (type: float), $f3 (type: float)
+ expressions: robot (type: string), floor_year (type: timestamp with local time zone), $f2 (type: float), $f3 (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -186,11 +186,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), floor_month (type: timestamp), $f2 (type: float), $f3 (type: float)
+ expressions: robot (type: string), floor_month (type: timestamp with local time zone), $f2 (type: float), $f3 (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -220,11 +220,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending","dimensionOrder":"numeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending","dimensionOrder":"numeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), floor_month (type: timestamp), $f3 (type: float), $f4 (type: float)
+ expressions: robot (type: string), floor_month (type: timestamp with local time zone), $f3 (type: float), $f4 (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -254,11 +254,11 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), floor_month (type: timestamp), $f3 (type: float), $f4 (type: float)
+ expressions: robot (type: string), floor_month (type: timestamp with local time zone), $f3 (type: float), $f4 (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -294,7 +294,7 @@ STAGE PLANS:
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1_0 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp with local time zone), $f1_0 (type: float), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
@@ -302,17 +302,17 @@ STAGE PLANS:
sort order: +
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: timestamp), _col1 (type: float)
+ value expressions: _col0 (type: timestamp with local time zone), _col1 (type: float)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: timestamp), VALUE._col1 (type: float), KEY.reducesinkkey0 (type: float)
+ expressions: VALUE._col0 (type: timestamp with local time zone), VALUE._col1 (type: float), KEY.reducesinkkey0 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Limit
Number of rows: 10
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: '1' (type: string), _col0 (type: timestamp), _col1 (type: float), _col2 (type: float)
+ expressions: '1' (type: string), _col0 (type: timestamp with local time zone), _col1 (type: float), _col2 (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
File Output Operator
@@ -333,8 +333,8 @@ PREHOOK: query: EXPLAIN
SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation)
FROM druid_table_1
WHERE floor_hour(`__time`)
- BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
- AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+ BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
+ AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
GROUP BY robot, floor_hour(`__time`)
ORDER BY m
LIMIT 100
@@ -343,8 +343,8 @@ POSTHOOK: query: EXPLAIN
SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation)
FROM druid_table_1
WHERE floor_hour(`__time`)
- BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
- AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+ BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
+ AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
GROUP BY robot, floor_hour(`__time`)
ORDER BY m
LIMIT 100
@@ -360,34 +360,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: druid_table_1
- filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+ filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean)
properties:
druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot"],"metrics":["added","variation"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
- predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+ predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), floor_hour(__time) (type: timestamp), added (type: float), variation (type: float)
+ expressions: robot (type: string), floor_hour(__time) (type: timestamp with local time zone), added (type: float), variation (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Group By Operator
aggregations: max(_col2), sum(_col3)
- keys: _col0 (type: string), _col1 (type: timestamp)
+ keys: _col0 (type: string), _col1 (type: timestamp with local time zone)
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: timestamp)
+ key expressions: _col0 (type: string), _col1 (type: timestamp with local time zone)
sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp)
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp with local time zone)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
value expressions: _col2 (type: float), _col3 (type: double)
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), sum(VALUE._col1)
- keys: KEY._col0 (type: string), KEY._col1 (type: timestamp)
+ keys: KEY._col0 (type: string), KEY._col1 (type: timestamp with local time zone)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -407,10 +407,10 @@ STAGE PLANS:
sort order: +
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: timestamp), _col3 (type: double)
+ value expressions: _col0 (type: string), _col1 (type: timestamp with local time zone), _col3 (type: double)
Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), KEY.reducesinkkey0 (type: float), VALUE._col2 (type: double)
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp with local time zone), KEY.reducesinkkey0 (type: float), VALUE._col2 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Limit
http://git-wip-us.apache.org/repos/asf/hive/blob/9975131c/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
index 13eac20..fcf203e 100644
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
+++ b/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
@@ -464,22 +464,21 @@ STAGE PLANS:
outputColumnNames: a
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Group By Operator
- keys: a (type: string), 0 (type: int)
+ keys: a (type: string)
mode: hash
- outputColumnNames: _col0, _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: int)
- sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Group By Operator
- keys: KEY._col0 (type: string), KEY._col1 (type: int)
+ keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- pruneGroupingSetId: true
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
@@ -532,35 +531,30 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
- keys: _col0 (type: double), 0 (type: int)
+ keys: _col0 (type: double)
mode: hash
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: double), _col1 (type: int)
- sort order: ++
- Map-reduce partition columns: _col0 (type: double), _col1 (type: int)
+ key expressions: _col0 (type: double)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: bigint)
+ value expressions: _col1 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
- keys: KEY._col0 (type: double), KEY._col1 (type: int)
+ keys: KEY._col0 (type: double)
mode: mergepartial
- outputColumnNames: _col0, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- pruneGroupingSetId: true
- Select Operator
- expressions: _col0 (type: double), _col2 (type: bigint)
- outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator