You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2018/03/24 17:32:42 UTC

[5/9] hive git commit: HIVE-18780 : Improve schema discovery For Druid Storage Handler (Slim Bouguerra via Ashutosh Chauhan) HIVE-18993 : Use Druid Expressions HIVE-14518 : Support 'having' translation for Druid GroupBy queries HIVE-18957 : Upgrade Calci

http://git-wip-us.apache.org/repos/asf/hive/blob/696affa2/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
index aa68f48..34cccef 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
@@ -64,7 +64,9 @@ STAGE PLANS:
         TableScan
           alias: druid_table
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"default.druid_table","descending":false,"granularity":"all","aggregations":[{"type":"count","name":"$f0"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
+            druid.fieldNames $f0
+            druid.fieldTypes bigint
+            druid.query.json {"queryType":"timeseries","dataSource":"default.druid_table","descending":false,"granularity":"all","aggregations":[{"type":"count","name":"$f0"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":false}}
             druid.query.type timeseries
           Select Operator
             expressions: $f0 (type: bigint)
@@ -97,10 +99,12 @@ STAGE PLANS:
         TableScan
           alias: druid_table
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"default.druid_table","descending":false,"granularity":"year","aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"cfloat"},{"type":"doubleSum","name":"$f2","fieldName":"cdouble"},{"type":"longSum","name":"$f3","fieldName":"ctinyint"},{"type":"longSum","name":"$f4","fieldName":"csmallint"},{"type":"longSum","name":"$f5","fieldName":"cint"},{"type":"longSum","name":"$f6","fieldName":"cbigint"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
+            druid.fieldNames timestamp,$f1,$f2,$f3,$f4,$f5,$f6
+            druid.fieldTypes timestamp with local time zone,double,double,bigint,bigint,bigint,bigint
+            druid.query.json {"queryType":"timeseries","dataSource":"default.druid_table","descending":false,"granularity":{"type":"period","period":"P1Y","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"cfloat"},{"type":"doubleSum","name":"$f2","fieldName":"cdouble"},{"type":"longSum","name":"$f3","fieldName":"ctinyint"},{"type":"longSum","name":"$f4","fieldName":"csmallint"},{"type":"longSum","name":"$f5","fieldName":"cint"},{"type":"longSum","name":"$f6","fieldName":"cbigint"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
           Select Operator
-            expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float), $f3 (type: bigint), $f4 (type: bigint), $f5 (type: bigint), $f6 (type: bigint)
+            expressions: timestamp (type: timestamp with local time zone), $f1 (type: double), $f2 (type: double), $f3 (type: bigint), $f4 (type: bigint), $f5 (type: bigint), $f6 (type: bigint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
             ListSink
 
@@ -114,8 +118,7 @@ FROM druid_table GROUP BY floor_year(`__time`)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@druid_table
 POSTHOOK: Output: hdfs://### HDFS PATH ###
-1968-12-31 16:00:00.0 US/Pacific	-4532.57	3660538.8	-4611	3658030	688783835691	8060200254
-1969-12-31 16:00:00.0 US/Pacific	-35057.676	2.3648124E7	-35356	4123059	719285966109	2932345033
+1969-01-01 00:00:00.0 US/Pacific	-39590.24694168568	2.7308662809692383E7	-39967	7781089	1408069801800	10992545287
 PREHOOK: query: EXPLAIN SELECT floor_year(`__time`), MIN(cfloat), MIN(cdouble), MIN(ctinyint), MIN(csmallint),MIN(cint), MIN(cbigint)
 FROM druid_table GROUP BY floor_year(`__time`)
 PREHOOK: type: QUERY
@@ -133,10 +136,12 @@ STAGE PLANS:
         TableScan
           alias: druid_table
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"default.druid_table","descending":false,"granularity":"year","aggregations":[{"type":"doubleMin","name":"$f1","fieldName":"cfloat"},{"type":"doubleMin","name":"$f2","fieldName":"cdouble"},{"type":"longMin","name":"$f3","fieldName":"ctinyint"},{"type":"longMin","name":"$f4","fieldName":"csmallint"},{"type":"longMin","name":"$f5","fieldName":"cint"},{"type":"longMin","name":"$f6","fieldName":"cbigint"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
+            druid.fieldNames timestamp,$f1,$f2,$f3,$f4,$f5,$f6
+            druid.fieldTypes timestamp with local time zone,float,double,tinyint,smallint,int,bigint
+            druid.query.json {"queryType":"timeseries","dataSource":"default.druid_table","descending":false,"granularity":{"type":"period","period":"P1Y","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMin","name":"$f1","fieldName":"cfloat"},{"type":"doubleMin","name":"$f2","fieldName":"cdouble"},{"type":"longMin","name":"$f3","fieldName":"ctinyint"},{"type":"longMin","name":"$f4","fieldName":"csmallint"},{"type":"longMin","name":"$f5","fieldName":"cint"},{"type":"longMin","name":"$f6","fieldName":"cbigint"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
           Select Operator
-            expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float), $f3 (type: bigint), $f4 (type: bigint), $f5 (type: bigint), $f6 (type: bigint)
+            expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double), $f3 (type: tinyint), $f4 (type: smallint), $f5 (type: int), $f6 (type: bigint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
             ListSink
 
@@ -150,8 +155,7 @@ FROM druid_table GROUP BY floor_year(`__time`)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@druid_table
 POSTHOOK: Output: hdfs://### HDFS PATH ###
-1968-12-31 16:00:00.0 US/Pacific	-1790.778	-308691.84	-1790	-313425	0	-8577981133
-1969-12-31 16:00:00.0 US/Pacific	-964.719	-287404.84	-1051	-292138	-1073279343	-2147311592
+1969-01-01 00:00:00.0 US/Pacific	-1790.778	-308691.84375	2	14255	-1073279343	-8577981133
 PREHOOK: query: EXPLAIN SELECT floor_year(`__time`), MAX(cfloat), MAX(cdouble), MAX(ctinyint), MAX(csmallint),MAX(cint), MAX(cbigint)
 FROM druid_table GROUP BY floor_year(`__time`)
 PREHOOK: type: QUERY
@@ -169,10 +173,12 @@ STAGE PLANS:
         TableScan
           alias: druid_table
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"default.druid_table","descending":false,"granularity":"year","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"cfloat"},{"type":"doubleMax","name":"$f2","fieldName":"cdouble"},{"type":"longMax","name":"$f3","fieldName":"ctinyint"},{"type":"longMax","name":"$f4","fieldName":"csmallint"},{"type":"longMax","name":"$f5","fieldName":"cint"},{"type":"longMax","name":"$f6","fieldName":"cbigint"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
+            druid.fieldNames timestamp,$f1,$f2,$f3,$f4,$f5,$f6
+            druid.fieldTypes timestamp with local time zone,float,double,tinyint,smallint,int,bigint
+            druid.query.json {"queryType":"timeseries","dataSource":"default.druid_table","descending":false,"granularity":{"type":"period","period":"P1Y","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"cfloat"},{"type":"doubleMax","name":"$f2","fieldName":"cdouble"},{"type":"longMax","name":"$f3","fieldName":"ctinyint"},{"type":"longMax","name":"$f4","fieldName":"csmallint"},{"type":"longMax","name":"$f5","fieldName":"cint"},{"type":"longMax","name":"$f6","fieldName":"cbigint"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
           Select Operator
-            expressions: __time (type: timestamp with local time zone), $f1 (type: float), $f2 (type: float), $f3 (type: bigint), $f4 (type: bigint), $f5 (type: bigint), $f6 (type: bigint)
+            expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double), $f3 (type: tinyint), $f4 (type: smallint), $f5 (type: int), $f6 (type: bigint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
             ListSink
 
@@ -186,8 +192,7 @@ FROM druid_table GROUP BY floor_year(`__time`)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@druid_table
 POSTHOOK: Output: hdfs://### HDFS PATH ###
-1968-12-31 16:00:00.0 US/Pacific	62.0	57235.0	62	57235	314088763179	2144274348
-1969-12-31 16:00:00.0 US/Pacific	769.164	1.9565518E7	723	57435	319104152611	4923772860
+1969-01-01 00:00:00.0 US/Pacific	769.164	1.9565518E7	-45	-8101	1276572707	4923772860
 PREHOOK: query: EXPLAIN SELECT cstring1, SUM(cdouble) as s FROM druid_table GROUP BY cstring1 ORDER BY s ASC LIMIT 10
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT cstring1, SUM(cdouble) as s FROM druid_table GROUP BY cstring1 ORDER BY s ASC LIMIT 10
@@ -203,10 +208,12 @@ STAGE PLANS:
         TableScan
           alias: druid_table
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table","granularity":"all","dimensions":[{"type":"default","dimension":"cstring1"}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f1","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"cdouble"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.fieldNames cstring1,$f1
+            druid.fieldTypes string,double
+            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table","granularity":"all","dimensions":[{"type":"default","dimension":"cstring1","outputName":"cstring1","outputType":"STRING"}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f1","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleSum","name":"$f1","fieldName":"cdouble"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
           Select Operator
-            expressions: cstring1 (type: string), $f1 (type: float)
+            expressions: cstring1 (type: string), $f1 (type: double)
             outputColumnNames: _col0, _col1
             ListSink
 
@@ -218,8 +225,8 @@ POSTHOOK: query: SELECT cstring1, SUM(cdouble) as s FROM druid_table GROUP BY cs
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@druid_table
 POSTHOOK: Output: hdfs://### HDFS PATH ###
-1cGVWH7n1QU	-596096.7
-821UdmGbkEf4j	-14161.827
+1cGVWH7n1QU	-596096.6875
+821UdmGbkEf4j	-14161.827026367188
 00iT08	0.0
 02v8WnLuYDos3Cq	0.0
 yv1js	0.0
@@ -243,10 +250,12 @@ STAGE PLANS:
         TableScan
           alias: druid_table
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table","granularity":"all","dimensions":[{"type":"default","dimension":"cstring2"}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"cstring2","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"cdouble"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.fieldNames cstring2,$f1
+            druid.fieldTypes string,double
+            druid.query.json {"queryType":"groupBy","dataSource":"default.druid_table","granularity":"all","dimensions":[{"type":"default","dimension":"cstring2","outputName":"cstring2","outputType":"STRING"}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"cstring2","direction":"ascending","dimensionOrder":"lexicographic"}]},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"cdouble"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
           Select Operator
-            expressions: cstring2 (type: string), $f1 (type: float)
+            expressions: cstring2 (type: string), $f1 (type: double)
             outputColumnNames: _col0, _col1
             ListSink
 
@@ -259,15 +268,15 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@druid_table
 POSTHOOK: Output: hdfs://### HDFS PATH ###
 NULL	1.9565518E7
-0AAE3daA78MISbsRsHJrp2PI	0.0
-0amu3m60U20Xa3	-200.0
-0aO3Lwer	0.0
-0aQBRP67JY0gpi	15601.0
-0b1WvXy	0.0
-0b03cuG3B4ASx4es1411336I	-7196.0
-0B5S310g	0.0
-0bffMd8KSbW32A8A5	0.0
-0bke07kBhD1s33AV3R1X7j7j	0.0
+0034fkcXMQI3	15601.0
+004J8y	0.0
+00GNm	-200.0
+00GW4dnb6Wgj52	-200.0
+00PBhB1Iefgk	0.0
+00d5kr1wEB7evExG	15601.0
+00qccwt8n	0.0
+017fFeQ3Gcsa83Xj2Vo0	0.0
+01EfkvNk6mjG44uxs	0.0
 PREHOOK: query: EXPLAIN
 SELECT `__time`
 FROM druid_table ORDER BY `__time` ASC LIMIT 10
@@ -293,11 +302,13 @@ STAGE PLANS:
                 TableScan
                   alias: druid_table
                   properties:
-                    druid.query.json {"queryType":"select","dataSource":"default.druid_table","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-                    druid.query.type select
+                    druid.fieldNames vc
+                    druid.fieldTypes timestamp with local time zone
+                    druid.query.json {"queryType":"scan","dataSource":"default.druid_table","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+                    druid.query.type scan
                   Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: __time (type: timestamp with local time zone)
+                    expressions: vc (type: timestamp with local time zone)
                     outputColumnNames: _col0
                     Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
@@ -375,11 +386,13 @@ STAGE PLANS:
                 TableScan
                   alias: druid_table
                   properties:
-                    druid.query.json {"queryType":"select","dataSource":"default.druid_table","descending":false,"intervals":["1900-01-01T00:00:00.000Z/1970-03-01T08:00:00.000Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-                    druid.query.type select
+                    druid.fieldNames vc
+                    druid.fieldTypes timestamp with local time zone
+                    druid.query.json {"queryType":"scan","dataSource":"default.druid_table","intervals":["1900-01-01T00:00:00.000Z/1970-03-01T08:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+                    druid.query.type scan
                   Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: __time (type: timestamp with local time zone)
+                    expressions: vc (type: timestamp with local time zone)
                     outputColumnNames: _col0
                     Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
@@ -459,11 +472,13 @@ STAGE PLANS:
                 TableScan
                   alias: druid_table
                   properties:
-                    druid.query.json {"queryType":"select","dataSource":"default.druid_table","descending":false,"intervals":["1968-01-01T08:00:00.000Z/1970-03-01T08:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-                    druid.query.type select
+                    druid.fieldNames vc
+                    druid.fieldTypes timestamp with local time zone
+                    druid.query.json {"queryType":"scan","dataSource":"default.druid_table","intervals":["1968-01-01T08:00:00.000Z/1970-03-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+                    druid.query.type scan
                   Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: __time (type: timestamp with local time zone)
+                    expressions: vc (type: timestamp with local time zone)
                     outputColumnNames: _col0
                     Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
@@ -545,11 +560,13 @@ STAGE PLANS:
                 TableScan
                   alias: druid_table
                   properties:
-                    druid.query.json {"queryType":"select","dataSource":"default.druid_table","descending":false,"intervals":["1968-01-01T08:00:00.000Z/1970-03-01T08:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-                    druid.query.type select
+                    druid.fieldNames vc
+                    druid.fieldTypes timestamp with local time zone
+                    druid.query.json {"queryType":"scan","dataSource":"default.druid_table","intervals":["1968-01-01T08:00:00.000Z/1970-03-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+                    druid.query.type scan
                   Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: __time (type: timestamp with local time zone)
+                    expressions: vc (type: timestamp with local time zone)
                     outputColumnNames: _col0
                     Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
@@ -631,11 +648,13 @@ STAGE PLANS:
                 TableScan
                   alias: druid_table
                   properties:
-                    druid.query.json {"queryType":"select","dataSource":"default.druid_table","descending":false,"intervals":["1968-01-01T08:00:00.000Z/1970-01-01T08:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-                    druid.query.type select
+                    druid.fieldNames vc
+                    druid.fieldTypes timestamp with local time zone
+                    druid.query.json {"queryType":"scan","dataSource":"default.druid_table","intervals":["1968-01-01T08:00:00.000Z/1970-01-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+                    druid.query.type scan
                   Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: __time (type: timestamp with local time zone)
+                    expressions: vc (type: timestamp with local time zone)
                     outputColumnNames: _col0
                     Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
@@ -717,11 +736,13 @@ STAGE PLANS:
                 TableScan
                   alias: druid_table
                   properties:
-                    druid.query.json {"queryType":"select","dataSource":"default.druid_table","descending":false,"intervals":["1968-01-01T08:00:00.000Z/1970-04-01T08:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-                    druid.query.type select
+                    druid.fieldNames vc
+                    druid.fieldTypes timestamp with local time zone
+                    druid.query.json {"queryType":"scan","dataSource":"default.druid_table","intervals":["1968-01-01T08:00:00.000Z/1970-04-01T08:00:00.001Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"\"__time\"","outputType":"LONG"}],"columns":["vc"],"resultFormat":"compactedList"}
+                    druid.query.type scan
                   Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: __time (type: timestamp with local time zone)
+                    expressions: vc (type: timestamp with local time zone)
                     outputColumnNames: _col0
                     Statistics: Num rows: 9173 Data size: 348640 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/696affa2/ql/src/test/results/clientpositive/druid_basic2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out
index eb2b83f..5ed039b 100644
--- a/ql/src/test/results/clientpositive/druid_basic2.q.out
+++ b/ql/src/test/results/clientpositive/druid_basic2.q.out
@@ -76,8 +76,10 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.fieldNames robot
+            druid.fieldTypes string
+            druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["robot"],"resultFormat":"compactedList"}
+            druid.query.type scan
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
           Select Operator
@@ -103,8 +105,10 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["delta"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.fieldNames delta
+            druid.fieldTypes float
+            druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["delta"],"resultFormat":"compactedList"}
+            druid.query.type scan
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
           Select Operator
@@ -134,8 +138,10 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-            druid.query.type select
+            druid.fieldNames robot
+            druid.fieldTypes string
+            druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"columns":["robot"],"resultFormat":"compactedList"}
+            druid.query.type scan
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
           Select Operator
@@ -165,7 +171,9 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.fieldNames robot
+            druid.fieldTypes string
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"}],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
@@ -210,8 +218,10 @@ STAGE PLANS:
           TableScan
             alias: druid_table_1
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"dimensions":["robot","language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.fieldNames robot,language
+              druid.fieldTypes string,string
+              druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["robot","language"],"resultFormat":"compactedList"}
+              druid.query.type scan
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             GatherStats: false
             Select Operator
@@ -230,8 +240,10 @@ STAGE PLANS:
           TableScan
             alias: druid_table_1
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"dimensions":["language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.fieldNames language
+              druid.fieldTypes string
+              druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["language"],"resultFormat":"compactedList"}
+              druid.query.type scan
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             GatherStats: false
             Reduce Output Operator
@@ -259,8 +271,10 @@ STAGE PLANS:
               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
               columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
               druid.datasource wikipedia
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"dimensions":["robot","language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.fieldNames robot,language
+              druid.fieldTypes string,string
+              druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["robot","language"],"resultFormat":"compactedList"}
+              druid.query.type scan
 #### A masked pattern was here ####
               name default.druid_table_1
               numFiles 0
@@ -285,8 +299,10 @@ STAGE PLANS:
                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
                 columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
                 druid.datasource wikipedia
-                druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"dimensions":["language"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-                druid.query.type select
+                druid.fieldNames language
+                druid.fieldTypes string
+                druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"selector","dimension":"language","value":null}},"columns":["language"],"resultFormat":"compactedList"}
+                druid.query.type scan
 #### A masked pattern was here ####
                 name default.druid_table_1
                 numFiles 0
@@ -311,10 +327,10 @@ STAGE PLANS:
           keys:
             0 _col1 (type: string)
             1 language (type: string)
-          outputColumnNames: _col0, _col3
+          outputColumnNames: _col0, _col2
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col0 (type: string), _col3 (type: string)
+            expressions: _col0 (type: string), _col2 (type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             File Output Operator
@@ -346,7 +362,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[5][tables = [druid_table_1, $hdt$_0]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[5][tables = [$hdt$_0, druid_table_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: EXPLAIN EXTENDED
 SELECT a.robot, b.language
 FROM
@@ -384,31 +400,37 @@ STAGE PLANS:
           TableScan
             alias: druid_table_1
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+              druid.fieldNames robot
+              druid.fieldTypes string
+              druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"columns":["robot"],"resultFormat":"compactedList"}
+              druid.query.type scan
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             GatherStats: false
             Select Operator
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+              expressions: robot (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Reduce Output Operator
                 null sort order: 
                 sort order: 
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                tag: 1
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                tag: 0
+                value expressions: _col0 (type: string)
                 auto parallelism: false
           TableScan
             alias: druid_table_1
             properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              druid.fieldNames vc
+              druid.fieldTypes string
+              druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"virtualColumns":[{"type":"expression","name":"vc","expression":"'en'","outputType":"STRING"}],"columns":["vc"],"resultFormat":"compactedList"}
+              druid.query.type scan
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
             GatherStats: false
             Reduce Output Operator
               null sort order: 
               sort order: 
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              tag: 0
-              value expressions: robot (type: string)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+              tag: 1
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -427,8 +449,10 @@ STAGE PLANS:
               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
               columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
               druid.datasource wikipedia
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
+              druid.fieldNames robot
+              druid.fieldTypes string
+              druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"columns":["robot"],"resultFormat":"compactedList"}
+              druid.query.type scan
 #### A masked pattern was here ####
               name default.druid_table_1
               numFiles 0
@@ -453,8 +477,10 @@ STAGE PLANS:
                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
                 columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
                 druid.datasource wikipedia
-                druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-                druid.query.type select
+                druid.fieldNames vc
+                druid.fieldTypes string
+                druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"virtualColumns":[{"type":"expression","name":"vc","expression":"'en'","outputType":"STRING"}],"columns":["vc"],"resultFormat":"compactedList"}
+                druid.query.type scan
 #### A masked pattern was here ####
                 name default.druid_table_1
                 numFiles 0
@@ -479,10 +505,10 @@ STAGE PLANS:
           keys:
             0 
             1 
-          outputColumnNames: _col1
+          outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), 'en' (type: string)
+            expressions: _col0 (type: string), 'en' (type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 1 Data size: 1 Basic stats: PARTIAL Column stats: NONE
             File Output Operator
@@ -529,132 +555,27 @@ ORDER BY CAST(robot AS INTEGER) ASC, m DESC
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
-              druid.query.type groupBy
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: robot (type: string), floor_day (type: timestamp with local time zone), $f3 (type: float), $f4 (type: float), UDFToInteger(robot) (type: int)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col4 (type: int), _col2 (type: float)
-                null sort order: az
-                sort order: +-
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                tag: -1
-                TopN: 10
-                TopN Hash Memory Usage: 0.1
-                value expressions: _col0 (type: string), _col1 (type: timestamp with local time zone), _col3 (type: float)
-                auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: druid_table_1
-            input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
-            output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
-              EXTERNAL TRUE
-              bucket_count -1
-              column.name.delimiter ,
-              columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
-              columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
-              columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
-              druid.datasource wikipedia
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
-              druid.query.type groupBy
-#### A masked pattern was here ####
-              name default.druid_table_1
-              numFiles 0
-              numRows 0
-              rawDataSize 0
-              serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
-              storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
-              totalSize 0
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
-          
-              input format: org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
-              output format: org.apache.hadoop.hive.druid.io.DruidOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"__time":"true","added":"true","anonymous":"true","count":"true","deleted":"true","delta":"true","language":"true","namespace":"true","newpage":"true","page":"true","robot":"true","unpatrolled":"true","user":"true","variation":"true"}}
-                EXTERNAL TRUE
-                bucket_count -1
-                column.name.delimiter ,
-                columns __time,robot,namespace,anonymous,unpatrolled,page,language,newpage,user,count,added,delta,variation,deleted
-                columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
-                columns.types timestamp with local time zone:string:string:string:string:string:string:string:string:float:float:float:float:float
-                druid.datasource wikipedia
-                druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
-                druid.query.type groupBy
-#### A masked pattern was here ####
-                name default.druid_table_1
-                numFiles 0
-                numRows 0
-                rawDataSize 0
-                serialization.ddl struct druid_table_1 { timestamp with local time zone __time, string robot, string namespace, string anonymous, string unpatrolled, string page, string language, string newpage, string user, float count, float added, float delta, float variation, float deleted}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.druid.QTestDruidSerDe
-                storage_handler org.apache.hadoop.hive.druid.QTestDruidStorageHandler
-                totalSize 0
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.druid.QTestDruidSerDe
-              name: default.druid_table_1
-            name: default.druid_table_1
-      Truncated Path -> Alias:
-        /druid_table_1 [$hdt$_0:druid_table_1]
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp with local time zone), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: float)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-#### A masked pattern was here ####
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  properties:
-                    columns _col0,_col1,_col2,_col3
-                    columns.types string:timestamp with local time zone:float:float
-                    escape.delim \
-                    hive.serialization.extend.additional.nesting.levels true
-                    serialization.escape.crlf true
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              TotalFiles: 1
-              GatherStats: false
-              MultiFileSpray: false
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.fieldNames robot,floor_day,$f3,$f4,(tok_function tok_int (tok_table_or_col robot))
+            druid.fieldTypes string,timestamp with local time zone,float,double,int
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"default","dimension":"language","outputName":"language","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1D","timeZone":"US/Pacific"},"timeZone":"US/Pacific","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"(tok_function tok_int (tok_table_or_col robot))","direction":"ascending","dimensionOrder":"numeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"postAggregations":[{"type":"expression","name":"(tok_function tok_int (tok_table_or_col robot
 ))","expression":"CAST(\"robot\", 'LONG')"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          GatherStats: false
+          Select Operator
+            expressions: robot (type: string), floor_day (type: timestamp with local time zone), $f3 (type: float), $f4 (type: double)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT substring(namespace, CAST(deleted AS INT), 4)
@@ -665,36 +586,26 @@ SELECT substring(namespace, CAST(deleted AS INT), 4)
 FROM druid_table_1
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["namespace"],"metrics":["deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: substring(namespace, UDFToInteger(deleted), 4) (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.fieldNames vc
+            druid.fieldTypes string
+            druid.query.json {"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"substring(\"namespace\", (CAST(\"deleted\", 'LONG') - 1), 4)","outputType":"STRING"}],"columns":["vc"],"resultFormat":"compactedList"}
+            druid.query.type scan
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: vc (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT robot, floor_day(`__time`)
@@ -713,83 +624,26 @@ ORDER BY robot
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: floor_day(__time) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean)
-            properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: floor_day(__time) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: robot (type: string), floor_day(__time) (type: timestamp with local time zone)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: timestamp with local time zone)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: timestamp with local time zone)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp with local time zone)
-                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                    TopN Hash Memory Usage: 0.1
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: timestamp with local time zone)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              TopN Hash Memory Usage: 0.1
-              value expressions: _col1 (type: timestamp with local time zone)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp with local time zone)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: 10
+      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.fieldNames robot,floor_day
+            druid.fieldTypes string,timestamp with local time zone
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1D","timeZone":"US/Pacific"},"timeZone":"US/Pacific","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"lexicographic"}]},"aggregations":[],"intervals":["1999-11-01T08:00:00.000Z/1999-11-10T08:00:00.001Z"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), floor_day (type: timestamp with local time zone)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT robot, `__time`
@@ -808,54 +662,26 @@ ORDER BY robot
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: floor_day(extract) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean)
-            properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
-              druid.query.type groupBy
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: floor_day(extract) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: robot (type: string), extract (type: timestamp with local time zone)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col1 (type: timestamp with local time zone)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp with local time zone)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: 10
+      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.fieldNames extract,robot
+            druid.fieldTypes timestamp with local time zone,string
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}},{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"lexicographic"}]},"aggregations":[],"intervals":["1999-11-01T08:00:00.000Z/1999-11-10T08:00:00.001Z"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), extract (type: timestamp with local time zone)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT robot, floor_day(`__time`)
@@ -884,7 +710,9 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1999-11-01T08:00:00.000Z/1999-11-10T08:00:00.001Z"]}
+            druid.fieldNames robot,floor_day
+            druid.fieldTypes string,timestamp with local time zone
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1D","timeZone":"US/Pacific"},"timeZone":"US/Pacific","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"lexicographic"}]},"aggregations":[],"intervals":["1999-11-01T08:00:00.000Z/1999-11-10T08:00:00.001Z"]}
             druid.query.type groupBy
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator