You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2016/09/08 08:45:57 UTC

[2/6] hive git commit: HIVE-14217: Druid integration (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientpositive/druid_basic2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out
new file mode 100644
index 0000000..3205905
--- /dev/null
+++ b/ql/src/test/results/clientpositive/druid_basic2.q.out
@@ -0,0 +1,533 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_1
+PREHOOK: query: DESCRIBE FORMATTED druid_table_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@druid_table_1
+POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@druid_table_1
+# col_name            	data_type           	comment             
+	 	 
+__time              	timestamp           	from deserializer   
+robot               	string              	from deserializer   
+namespace           	string              	from deserializer   
+anonymous           	string              	from deserializer   
+unpatrolled         	string              	from deserializer   
+page                	string              	from deserializer   
+language            	string              	from deserializer   
+newpage             	string              	from deserializer   
+user                	string              	from deserializer   
+count               	float               	from deserializer   
+added               	float               	from deserializer   
+delta               	float               	from deserializer   
+variation           	float               	from deserializer   
+deleted             	float               	from deserializer   
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	EXTERNAL            	TRUE                
+	druid.datasource    	wikipedia           
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	storage_handler     	org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.druid.QTestDruidSerDe	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- dimension
+EXPLAIN EXTENDED
+SELECT robot FROM druid_table_1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- dimension
+EXPLAIN EXTENDED
+SELECT robot FROM druid_table_1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot"],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          GatherStats: false
+          Select Operator
+            expressions: robot (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- metric
+EXPLAIN EXTENDED
+SELECT delta FROM druid_table_1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- metric
+EXPLAIN EXTENDED
+SELECT delta FROM druid_table_1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":[],"metrics":["delta"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          GatherStats: false
+          Select Operator
+            expressions: delta (type: float)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: EXPLAIN EXTENDED
+SELECT robot
+FROM druid_table_1
+WHERE language = 'en'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+SELECT robot
+FROM druid_table_1
+WHERE language = 'en'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          GatherStats: false
+          Select Operator
+            expressions: robot (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: EXPLAIN EXTENDED
+SELECT DISTINCT robot
+FROM druid_table_1
+WHERE language = 'en'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+SELECT DISTINCT robot
+FROM druid_table_1
+WHERE language = 'en'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"ALL","dimensions":["robot"],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          GatherStats: false
+          Select Operator
+            expressions: robot (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- TODO: currently nothing is pushed - ISNOTNULL
+EXPLAIN EXTENDED
+SELECT a.robot, b.language
+FROM
+(
+  (SELECT robot, language
+  FROM druid_table_1) a
+  JOIN
+  (SELECT language
+  FROM druid_table_1) b
+  ON a.language = b.language
+)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- TODO: currently nothing is pushed - ISNOTNULL
+EXPLAIN EXTENDED
+SELECT a.robot, b.language
+FROM
+(
+  (SELECT robot, language
+  FROM druid_table_1) a
+  JOIN
+  (SELECT language
+  FROM druid_table_1) b
+  ON a.language = b.language
+)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: druid_table_1
+            filterExpr: language is not null (type: boolean)
+            properties:
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: language is not null (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                expressions: robot (type: string), language (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col1 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: string)
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  tag: 0
+                  value expressions: _col0 (type: string)
+                  auto parallelism: false
+          TableScan
+            alias: druid_table_1
+            filterExpr: language is not null (type: boolean)
+            properties:
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: language is not null (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                expressions: language (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  tag: 1
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: druid_table_1
+            input format: org.apache.hadoop.hive.druid.HiveDruidQueryBasedInputFormat
+            output format: org.apache.hadoop.hive.druid.HiveDruidOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              EXTERNAL TRUE
+              bucket_count -1
+              columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
+              columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
+              columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+              druid.datasource wikipedia
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+#### A masked pattern was here ####
+              name default.druid_table_1
+              numFiles 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
+              storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+              totalSize 0
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
+          
+              input format: org.apache.hadoop.hive.druid.HiveDruidQueryBasedInputFormat
+              output format: org.apache.hadoop.hive.druid.HiveDruidOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                EXTERNAL TRUE
+                bucket_count -1
+                columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
+                columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
+                columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+                druid.datasource wikipedia
+                druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+                druid.query.type select
+#### A masked pattern was here ####
+                name default.druid_table_1
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
+                storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
+              name: default.druid_table_1
+            name: default.druid_table_1
+      Truncated Path -> Alias:
+        /druid_table_1 [$hdt$_0:druid_table_1, $hdt$_1:druid_table_1]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col1 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col2
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col2 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1
+                    columns.types string:string
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.escape.crlf true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join JOIN[5][tables = [druid_table_1, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: EXPLAIN EXTENDED
+SELECT a.robot, b.language
+FROM
+(
+  (SELECT robot, language
+  FROM druid_table_1
+  WHERE language = 'en') a
+  JOIN
+  (SELECT language
+  FROM druid_table_1) b
+  ON a.language = b.language
+)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+SELECT a.robot, b.language
+FROM
+(
+  (SELECT robot, language
+  FROM druid_table_1
+  WHERE language = 'en') a
+  JOIN
+  (SELECT language
+  FROM druid_table_1) b
+  ON a.language = b.language
+)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: druid_table_1
+            properties:
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+            GatherStats: false
+            Select Operator
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+              Reduce Output Operator
+                null sort order: 
+                sort order: 
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                tag: 1
+                auto parallelism: false
+          TableScan
+            alias: druid_table_1
+            properties:
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            GatherStats: false
+            Reduce Output Operator
+              null sort order: 
+              sort order: 
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              tag: 0
+              value expressions: robot (type: string)
+              auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: druid_table_1
+            input format: org.apache.hadoop.hive.druid.HiveDruidQueryBasedInputFormat
+            output format: org.apache.hadoop.hive.druid.HiveDruidOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              EXTERNAL TRUE
+              bucket_count -1
+              columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
+              columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
+              columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+              druid.datasource wikipedia
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+#### A masked pattern was here ####
+              name default.druid_table_1
+              numFiles 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
+              storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+              totalSize 0
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
+          
+              input format: org.apache.hadoop.hive.druid.HiveDruidQueryBasedInputFormat
+              output format: org.apache.hadoop.hive.druid.HiveDruidOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                EXTERNAL TRUE
+                bucket_count -1
+                columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
+                columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
+                columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
+                druid.datasource wikipedia
+                druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+                druid.query.type select
+#### A masked pattern was here ####
+                name default.druid_table_1
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct druid_table_1 { timestamp __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
+                storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
+              name: default.druid_table_1
+            name: default.druid_table_1
+      Truncated Path -> Alias:
+        /druid_table_1 [$hdt$_0:druid_table_1, druid_table_1]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col1
+          Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: string), 'en' (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1
+                    columns.types string:string
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.escape.crlf true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientpositive/druid_intervals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out b/ql/src/test/results/clientpositive/druid_intervals.q.out
new file mode 100644
index 0000000..984bb79
--- /dev/null
+++ b/ql/src/test/results/clientpositive/druid_intervals.q.out
@@ -0,0 +1,398 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_1
+PREHOOK: query: DESCRIBE FORMATTED druid_table_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@druid_table_1
+POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@druid_table_1
+# col_name            	data_type           	comment             
+	 	 
+__time              	timestamp           	from deserializer   
+robot               	string              	from deserializer   
+namespace           	string              	from deserializer   
+anonymous           	string              	from deserializer   
+unpatrolled         	string              	from deserializer   
+page                	string              	from deserializer   
+language            	string              	from deserializer   
+newpage             	string              	from deserializer   
+user                	string              	from deserializer   
+count               	float               	from deserializer   
+added               	float               	from deserializer   
+delta               	float               	from deserializer   
+variation           	float               	from deserializer   
+deleted             	float               	from deserializer   
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	EXTERNAL            	TRUE                
+	druid.datasource    	wikipedia           
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	storage_handler     	org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.druid.QTestDruidSerDe	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- (-\u221e\u2025+\u221e)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- (-\u221e\u2025+\u221e)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- (-\u221e\u20252012-03-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` < '2012-03-01 00:00:00'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- (-\u221e\u20252012-03-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` < '2012-03-01 00:00:00'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/2012-03-01T00:00:00.000-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- [2010-01-01 00:00:00\u20252012-03-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- [2010-01-01 00:00:00\u20252012-03-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2012-03-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- [2010-01-01 00:00:00\u20252011-01-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00'
+    AND `__time` < '2011-01-01 00:00:00'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- [2010-01-01 00:00:00\u20252011-01-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00'
+    AND `__time` < '2011-01-01 00:00:00'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.000-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- [2010-01-01 00:00:00\u20252011-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- [2010-01-01 00:00:00\u20252011-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- [2010-01-01 00:00:00\u20252011-01-01 00:00:00],[2012-01-01 00:00:00\u20252013-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00')
+    OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00')
+PREHOOK: type: QUERY
+POSTHOOK: query: -- [2010-01-01 00:00:00\u20252011-01-01 00:00:00],[2012-01-01 00:00:00\u20252013-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00')
+    OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.001-08:00","2012-01-01T00:00:00.000-08:00/2013-01-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- OVERLAP [2010-01-01 00:00:00\u20252012-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00')
+    OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00')
+PREHOOK: type: QUERY
+POSTHOOK: query: -- OVERLAP [2010-01-01 00:00:00\u20252012-01-01 00:00:00]
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00')
+    OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2012-01-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- IN: MULTIPLE INTERVALS [2010-01-01 00:00:00\u20252010-01-01 00:00:00),[2011-01-01 00:00:00\u20252011-01-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
+PREHOOK: type: QUERY
+POSTHOOK: query: -- IN: MULTIPLE INTERVALS [2010-01-01 00:00:00\u20252010-01-01 00:00:00),[2011-01-01 00:00:00\u20252011-01-01 00:00:00)
+EXPLAIN
+SELECT `__time`
+FROM druid_table_1
+WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2010-01-01T00:00:00.001-08:00","2011-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.001-08:00"],"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT `__time`, robot
+FROM druid_table_1
+WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT `__time`, robot
+FROM druid_table_1
+WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["2010-01-01T00:00:00.000-08:00/2010-01-01T00:00:00.001-08:00","2011-01-01T00:00:00.000-08:00/2011-01-01T00:00:00.001-08:00"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), 'user1' (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT `__time`, robot
+FROM druid_table_1
+WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT `__time`, robot
+FROM druid_table_1
+WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: druid_table_1
+            filterExpr: ((__time) IN ('2010-01-01 00:00:00', '2011-01-01 00:00:00') or (robot = 'user1')) (type: boolean)
+            properties:
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Filter Operator
+              predicate: ((__time) IN ('2010-01-01 00:00:00', '2011-01-01 00:00:00') or (robot = 'user1')) (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                expressions: __time (type: timestamp), robot (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientpositive/druid_timeseries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out
new file mode 100644
index 0000000..8d974a4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -0,0 +1,566 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_1
+PREHOOK: query: DESCRIBE FORMATTED druid_table_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@druid_table_1
+POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@druid_table_1
+# col_name            	data_type           	comment             
+	 	 
+__time              	timestamp           	from deserializer   
+robot               	string              	from deserializer   
+namespace           	string              	from deserializer   
+anonymous           	string              	from deserializer   
+unpatrolled         	string              	from deserializer   
+page                	string              	from deserializer   
+language            	string              	from deserializer   
+newpage             	string              	from deserializer   
+user                	string              	from deserializer   
+count               	float               	from deserializer   
+added               	float               	from deserializer   
+delta               	float               	from deserializer   
+variation           	float               	from deserializer   
+deleted             	float               	from deserializer   
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	EXTERNAL            	TRUE                
+	druid.datasource    	wikipedia           
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	storage_handler     	org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.druid.QTestDruidSerDe	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- GRANULARITY: ALL
+EXPLAIN
+SELECT max(added), sum(variation)
+FROM druid_table_1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: ALL
+EXPLAIN
+SELECT max(added), sum(variation)
+FROM druid_table_1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"ALL","aggregations":[{"type":"longMax","name":"$f0","fieldName":"added"},{"type":"doubleSum","name":"$f1","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: $f0 (type: bigint), $f1 (type: float)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: NONE
+EXPLAIN
+SELECT `__time`, max(added), sum(variation)
+FROM druid_table_1
+GROUP BY `__time`
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: NONE
+EXPLAIN
+SELECT `__time`, max(added), sum(variation)
+FROM druid_table_1
+GROUP BY `__time`
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"NONE","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: YEAR
+EXPLAIN
+SELECT floor_year(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_year(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: YEAR
+EXPLAIN
+SELECT floor_year(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_year(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"YEAR","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: QUARTER
+EXPLAIN
+SELECT floor_quarter(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_quarter(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: QUARTER
+EXPLAIN
+SELECT floor_quarter(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_quarter(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"QUARTER","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: MONTH
+EXPLAIN
+SELECT floor_month(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_month(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: MONTH
+EXPLAIN
+SELECT floor_month(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_month(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"MONTH","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: WEEK
+EXPLAIN
+SELECT floor_week(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_week(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: WEEK
+EXPLAIN
+SELECT floor_week(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_week(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"WEEK","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: DAY
+EXPLAIN
+SELECT floor_day(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_day(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: DAY
+EXPLAIN
+SELECT floor_day(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_day(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"DAY","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: HOUR
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_hour(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: HOUR
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_hour(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"HOUR","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: MINUTE
+EXPLAIN
+SELECT floor_minute(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_minute(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: MINUTE
+EXPLAIN
+SELECT floor_minute(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_minute(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"MINUTE","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: SECOND
+EXPLAIN
+SELECT floor_second(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_second(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: SECOND
+EXPLAIN
+SELECT floor_second(`__time`), max(added), sum(variation)
+FROM druid_table_1
+GROUP BY floor_second(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"SECOND","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- WITH FILTER ON DIMENSION
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+WHERE robot='1'
+GROUP BY floor_hour(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- WITH FILTER ON DIMENSION
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+WHERE robot='1'
+GROUP BY floor_hour(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":"false","granularity":"HOUR","filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type timeseries
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- WITH FILTER ON TIME
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+WHERE floor_hour(`__time`)
+    BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+GROUP BY floor_hour(`__time`)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- WITH FILTER ON TIME
+EXPLAIN
+SELECT floor_hour(`__time`), max(added), sum(variation)
+FROM druid_table_1
+WHERE floor_hour(`__time`)
+    BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+GROUP BY floor_hour(`__time`)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: druid_table_1
+            filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+            properties:
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Filter Operator
+              predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                expressions: floor_hour(__time) (type: timestamp), added (type: float), variation (type: float)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Group By Operator
+                  aggregations: max(_col1), sum(_col2)
+                  keys: _col0 (type: timestamp)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: timestamp)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: timestamp)
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    value expressions: _col1 (type: float), _col2 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: max(VALUE._col0), sum(VALUE._col1)
+          keys: KEY._col0 (type: timestamp)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- WITH FILTER ON TIME
+EXPLAIN
+SELECT subq.h, subq.m, subq.s
+FROM
+(
+  SELECT floor_hour(`__time`) as h, max(added) as m, sum(variation) as s
+  FROM druid_table_1
+  GROUP BY floor_hour(`__time`)
+) subq
+WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- WITH FILTER ON TIME
+EXPLAIN
+SELECT subq.h, subq.m, subq.s
+FROM
+(
+  SELECT floor_hour(`__time`) as h, max(added) as m, sum(variation) as s
+  FROM druid_table_1
+  GROUP BY floor_hour(`__time`)
+) subq
+WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: druid_table_1
+            filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+            properties:
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Filter Operator
+              predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                expressions: floor_hour(__time) (type: timestamp), added (type: float), variation (type: float)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Group By Operator
+                  aggregations: max(_col1), sum(_col2)
+                  keys: _col0 (type: timestamp)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: timestamp)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: timestamp)
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    value expressions: _col1 (type: float), _col2 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: max(VALUE._col0), sum(VALUE._col1)
+          keys: KEY._col0 (type: timestamp)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/58d1befa/ql/src/test/results/clientpositive/druid_topn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out
new file mode 100644
index 0000000..17bdaed
--- /dev/null
+++ b/ql/src/test/results/clientpositive/druid_topn.q.out
@@ -0,0 +1,419 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1
+POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_1
+PREHOOK: query: DESCRIBE FORMATTED druid_table_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@druid_table_1
+POSTHOOK: query: DESCRIBE FORMATTED druid_table_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@druid_table_1
+# col_name            	data_type           	comment             
+	 	 
+__time              	timestamp           	from deserializer   
+robot               	string              	from deserializer   
+namespace           	string              	from deserializer   
+anonymous           	string              	from deserializer   
+unpatrolled         	string              	from deserializer   
+page                	string              	from deserializer   
+language            	string              	from deserializer   
+newpage             	string              	from deserializer   
+user                	string              	from deserializer   
+count               	float               	from deserializer   
+added               	float               	from deserializer   
+delta               	float               	from deserializer   
+variation           	float               	from deserializer   
+deleted             	float               	from deserializer   
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	EXTERNAL            	TRUE                
+	druid.datasource    	wikipedia           
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	storage_handler     	org.apache.hadoop.hive.druid.QTestDruidStorageHandler
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.druid.QTestDruidSerDe	 
+InputFormat:        	null                	 
+OutputFormat:       	null                	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- GRANULARITY: ALL
+EXPLAIN
+SELECT robot, max(added) as m, sum(variation)
+FROM druid_table_1
+GROUP BY robot
+ORDER BY m DESC
+LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: ALL
+EXPLAIN
+SELECT robot, max(added) as m, sum(variation)
+FROM druid_table_1
+GROUP BY robot
+ORDER BY m DESC
+LIMIT 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"ALL","dimension":"robot","metric":"$f1","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"threshold":100}
+            druid.query.type topN
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), $f1 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: NONE
+EXPLAIN
+SELECT robot, `__time`, max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, `__time`
+ORDER BY s DESC
+LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: NONE
+EXPLAIN
+SELECT robot, `__time`, max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, `__time`
+ORDER BY s DESC
+LIMIT 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"NONE","dimension":"robot","metric":"$f3","aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"threshold":100}
+            druid.query.type topN
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- GRANULARITY: YEAR
+EXPLAIN
+SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, floor_year(`__time`)
+ORDER BY s DESC
+LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- GRANULARITY: YEAR
+EXPLAIN
+SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, floor_year(`__time`)
+ORDER BY s DESC
+LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"topN","dataSource":"wikipedia","granularity":"YEAR","dimension":"robot","metric":"$f3","aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"threshold":10}
+            druid.query.type topN
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- ASC: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, floor_month(`__time`)
+ORDER BY s
+LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- ASC: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, floor_month(`__time`)
+ORDER BY s
+LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending"}]},"aggregations":[{"type":"longMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- MULTIPLE ORDER: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, namespace, floor_month(`__time`)
+ORDER BY s DESC, m DESC
+LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- MULTIPLE ORDER: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, namespace, floor_month(`__time`)
+ORDER BY s DESC, m DESC
+LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot","namespace"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending"},{"dimension":"$f3","direction":"descending"}]},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), __time (type: timestamp), $f3 (type: bigint), $f4 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- MULTIPLE ORDER MIXED: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, namespace, floor_month(`__time`)
+ORDER BY robot ASC, m DESC
+LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- MULTIPLE ORDER MIXED: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
+FROM druid_table_1
+GROUP BY robot, namespace, floor_month(`__time`)
+ORDER BY robot ASC, m DESC
+LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"MONTH","dimensions":["robot","namespace"],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending"},{"dimension":"$f3","direction":"descending"}]},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), __time (type: timestamp), $f3 (type: bigint), $f4 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- WITH FILTER ON DIMENSION: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+WHERE robot='1'
+GROUP BY robot, floor_year(`__time`)
+ORDER BY s
+LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- WITH FILTER ON DIMENSION: TRANSFORM INTO GROUP BY
+EXPLAIN
+SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
+FROM druid_table_1
+WHERE robot='1'
+GROUP BY robot, floor_year(`__time`)
+ORDER BY s
+LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"YEAR","dimensions":[],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f2","direction":"ascending"}]},"filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"longMax","name":"$f1_0","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: '1' (type: string), __time (type: timestamp), $f1_0 (type: bigint), $f2 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
+
+PREHOOK: query: -- WITH FILTER ON TIME
+EXPLAIN
+SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation)
+FROM druid_table_1
+WHERE floor_hour(`__time`)
+    BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+GROUP BY robot, floor_hour(`__time`)
+ORDER BY m
+LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: -- WITH FILTER ON TIME
+EXPLAIN
+SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation)
+FROM druid_table_1
+WHERE floor_hour(`__time`)
+    BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP)
+        AND CAST('2014-01-01 00:00:00' AS TIMESTAMP)
+GROUP BY robot, floor_hour(`__time`)
+ORDER BY m
+LIMIT 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: druid_table_1
+            filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+            properties:
+              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":"false","intervals":["1900-01-01T00:00:00.000-08:00/3000-01-01T00:00:00.000-08:00"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"ALL","pagingSpec":{"threshold":1},"context":{"druid.query.fetch":false}}
+              druid.query.type select
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Filter Operator
+              predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                expressions: robot (type: string), floor_hour(__time) (type: timestamp), added (type: float), variation (type: float)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Group By Operator
+                  aggregations: max(_col2), sum(_col3)
+                  keys: _col0 (type: string), _col1 (type: timestamp)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string), _col1 (type: timestamp)
+                    sort order: ++
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp)
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    value expressions: _col2 (type: float), _col3 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: max(VALUE._col0), sum(VALUE._col1)
+          keys: KEY._col0 (type: string), KEY._col1 (type: timestamp)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col2 (type: float)
+              sort order: +
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col0 (type: string), _col1 (type: timestamp), _col3 (type: double)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), KEY.reducesinkkey0 (type: float), VALUE._col2 (type: double)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Limit
+            Number of rows: 100
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 100
+      Processor Tree:
+        ListSink
+