You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2019/01/23 16:38:58 UTC

[26/51] [partial] hive git commit: HIVE-17084: Turn on hive.stats.fetch.column.stats configuration flag (Zoltan Haindrich reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out b/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out
index 1e0de3c..c495c29 100644
--- a/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out
+++ b/ql/src/test/results/clientpositive/distinct_windowing_no_cbo.q.out
@@ -65,19 +65,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over10k_n14
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Reduce Output Operator
               key expressions: si (type: smallint), i (type: int)
               sort order: ++
               Map-reduce partition columns: si (type: smallint)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               value expressions: t (type: tinyint)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: VALUE._col0 (type: tinyint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey1 (type: int)
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -97,16 +97,16 @@ STAGE PLANS:
                         name: first_value
                         window function: GenericUDAFFirstValueEvaluator
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: first_value_window_0 (type: tinyint)
               outputColumnNames: first_value_window_0
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: first_value_window_0 (type: tinyint)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   table:
@@ -122,7 +122,7 @@ STAGE PLANS:
               key expressions: _col0 (type: tinyint)
               sort order: +
               Map-reduce partition columns: _col0 (type: tinyint)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -130,13 +130,13 @@ STAGE PLANS:
           keys: KEY._col0 (type: tinyint)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -189,18 +189,18 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over10k_n14
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Reduce Output Operator
               key expressions: si (type: smallint), i (type: int)
               sort order: ++
               Map-reduce partition columns: si (type: smallint)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey1 (type: int)
           outputColumnNames: _col1, _col2
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -220,16 +220,16 @@ STAGE PLANS:
                         name: last_value
                         window function: GenericUDAFLastValueEvaluator
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: last_value_window_0 (type: int)
               outputColumnNames: last_value_window_0
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: last_value_window_0 (type: int)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   table:
@@ -245,7 +245,7 @@ STAGE PLANS:
               key expressions: _col0 (type: int)
               sort order: +
               Map-reduce partition columns: _col0 (type: int)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -253,13 +253,13 @@ STAGE PLANS:
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -316,19 +316,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over10k_n14
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Reduce Output Operator
               key expressions: si (type: smallint), i (type: int)
               sort order: ++
               Map-reduce partition columns: si (type: smallint)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               value expressions: t (type: tinyint)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: VALUE._col0 (type: tinyint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey1 (type: int)
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -354,16 +354,16 @@ STAGE PLANS:
                         name: first_value
                         window function: GenericUDAFFirstValueEvaluator
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: last_value_window_0 (type: int), first_value_window_1 (type: tinyint)
               outputColumnNames: last_value_window_0, first_value_window_1
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: last_value_window_0 (type: int), first_value_window_1 (type: tinyint)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   table:
@@ -379,7 +379,7 @@ STAGE PLANS:
               key expressions: _col0 (type: int), _col1 (type: tinyint)
               sort order: ++
               Map-reduce partition columns: _col0 (type: int), _col1 (type: tinyint)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -387,13 +387,13 @@ STAGE PLANS:
           keys: KEY._col0 (type: int), KEY._col1 (type: tinyint)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 50
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -496,22 +496,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over10k_n14
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: si (type: smallint), f (type: float)
               outputColumnNames: si, f
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: max(f)
                 keys: si (type: smallint)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: smallint)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: smallint)
-                  Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: float)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -520,10 +520,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: smallint)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: (_col1 > 0.0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -539,13 +539,13 @@ STAGE PLANS:
               key expressions: _col0 (type: smallint), _col1 (type: float)
               sort order: ++
               Map-reduce partition columns: _col0 (type: smallint)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey1 (type: float)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -566,17 +566,17 @@ STAGE PLANS:
                         window function: GenericUDAFRankEvaluator
                         window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX)
                         isPivotResult: true
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: smallint), _col1 (type: float), rank_window_0 (type: int)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               Limit
                 Number of rows: 50
-                Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -679,18 +679,18 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over10k_n14
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Reduce Output Operator
               key expressions: si (type: smallint), i (type: int)
               sort order: ++
               Map-reduce partition columns: si (type: smallint)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey1 (type: int)
           outputColumnNames: _col1, _col2
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -711,16 +711,16 @@ STAGE PLANS:
                         window function: GenericUDAFRankEvaluator
                         window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX)
                         isPivotResult: true
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: rank_window_0 (type: int), _col1 (type: smallint)
               outputColumnNames: rank_window_0, _col1
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: _col1 (type: smallint), rank_window_0 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   table:
@@ -736,7 +736,7 @@ STAGE PLANS:
               key expressions: _col0 (type: smallint), _col1 (type: int)
               sort order: ++
               Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int)
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -744,13 +744,13 @@ STAGE PLANS:
           keys: KEY._col0 (type: smallint), KEY._col1 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 50
-            Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 10175440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/druid_timeseries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out
index c0c12bc..fd69771 100644
--- a/ql/src/test/results/clientpositive/druid_timeseries.q.out
+++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -33,11 +33,11 @@ STAGE PLANS:
             druid.fieldTypes bigint
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","filter":{"type":"or","fields":[{"type":"and","fields":[{"type":"bound","dimension":"__time","lower":"2010-01-01T00:00:00.000Z","lowerStrict":false,"ordering":"lexicographic","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"bound","dimension":"__time","upper":"2012-03-01T00:00:00.000Z","upperStrict":false,"ordering":"lexicographic","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}]},{"type":"bound","dimension":"added","upper":"0.0","upperStrict":false,"ordering":"numeric"}]},"aggregations":[{"type":"count","name":"$f0"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":false}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: $f0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC'
@@ -63,11 +63,11 @@ STAGE PLANS:
             druid.fieldTypes bigint
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"count","name":"$f0"}],"intervals":["1900-01-01T00:00:00.000Z/2010-01-01T00:00:00.001Z"],"context":{"skipEmptyBuckets":false}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: $f0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -97,11 +97,11 @@ STAGE PLANS:
             druid.fieldTypes float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"doubleMax","name":"$f0","fieldName":"added"},{"type":"doubleSum","name":"$f1","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":false}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: $f0 (type: float), $f1 (type: double)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -133,11 +133,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: extract (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -169,11 +169,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"P1Y","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -205,11 +205,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"P3M","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -241,11 +241,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"P1M","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -277,11 +277,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"P1W","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -313,11 +313,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"P1D","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -349,11 +349,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"PT1H","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -385,11 +385,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"PT1M","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -421,11 +421,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"PT1S","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -459,11 +459,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"PT1H","timeZone":"US/Pacific"},"filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -501,11 +501,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"PT1H","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["2010-01-01T08:00:00.000Z/2014-01-01T08:00:00.001Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -549,11 +549,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":{"type":"period","period":"PT1H","timeZone":"US/Pacific"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["2010-01-01T08:00:00.000Z/2014-01-01T08:00:00.001Z"],"context":{"skipEmptyBuckets":true}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: timestamp (type: timestamp with local time zone), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3
@@ -579,11 +579,11 @@ STAGE PLANS:
             druid.fieldTypes bigint
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"count","name":"$f0"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":false}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: $f0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC'
@@ -609,11 +609,11 @@ STAGE PLANS:
             druid.fieldTypes bigint
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"count","name":"$f0"}],"intervals":["1900-01-01T00:00:00.000Z/2010-01-01T00:00:00.001Z"],"context":{"skipEmptyBuckets":false}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: $f0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00'
@@ -639,11 +639,11 @@ STAGE PLANS:
             druid.fieldTypes bigint
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"count","name":"$f0"}],"intervals":["2010-01-01T08:00:00.000Z/3000-01-01T00:00:00.000Z"],"context":{"skipEmptyBuckets":false}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: $f0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00' OR  `__time` <= '2012-03-01 00:00:00'
@@ -669,10 +669,10 @@ STAGE PLANS:
             druid.fieldTypes bigint
             druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"count","name":"$f0"}],"intervals":["1900-01-01T00:00:00.000Z/2012-03-01T08:00:00.001Z"],"context":{"skipEmptyBuckets":false}}
             druid.query.type timeseries
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: $f0 (type: bigint)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/druid_topn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out
index 755e977..b0b435f 100644
--- a/ql/src/test/results/clientpositive/druid_topn.q.out
+++ b/ql/src/test/results/clientpositive/druid_topn.q.out
@@ -94,11 +94,11 @@ STAGE PLANS:
             druid.fieldTypes string,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f1","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: robot (type: string), $f1 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -134,11 +134,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,string,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: robot (type: string), extract (type: timestamp with local time zone), $f2 (type: float), $f3 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -174,11 +174,11 @@ STAGE PLANS:
             druid.fieldTypes string,timestamp with local time zone,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_year","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1Y","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: robot (type: string), floor_year (type: timestamp with local time zone), $f2 (type: float), $f3 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -214,11 +214,11 @@ STAGE PLANS:
             druid.fieldTypes string,timestamp with local time zone,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1M","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: robot (type: string), floor_month (type: timestamp with local time zone), $f2 (type: float), $f3 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -254,11 +254,11 @@ STAGE PLANS:
             druid.fieldTypes string,string,timestamp with local time zone,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"default","dimension":"namespace","outputName":"namespace","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1M","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending","dimensionOrder":"numeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: robot (type: string), floor_month (type: timestamp with local time zone), $f3 (type: float), $f4 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -294,11 +294,11 @@ STAGE PLANS:
             druid.fieldTypes string,string,timestamp with local time zone,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"default","dimension":"namespace","outputName":"namespace","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1M","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"lexicographic"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: robot (type: string), floor_month (type: timestamp with local time zone), $f3 (type: float), $f4 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -336,11 +336,11 @@ STAGE PLANS:
             druid.fieldTypes timestamp with local time zone,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"floor_year","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"P1Y","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f2","direction":"ascending","dimensionOrder":"numeric"}]},"filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"doubleMax","name":"$f1_0","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: '1' (type: string), floor_year (type: timestamp with local time zone), $f1_0 (type: float), $f2 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
             ListSink
 
 PREHOOK: query: EXPLAIN
@@ -382,10 +382,10 @@ STAGE PLANS:
             druid.fieldTypes string,timestamp with local time zone,float,double
             druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot","outputName":"robot","outputType":"STRING"},{"type":"extraction","dimension":"__time","outputName":"floor_hour","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":{"type":"period","period":"PT1H","timeZone":"US/Pacific"},"timeZone":"UTC","locale":"und"}}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f2","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["2010-01-01T08:00:00.000Z/2014-01-01T08:00:00.001Z"]}
             druid.query.type groupBy
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: robot (type: string), floor_hour (type: timestamp with local time zone), $f2 (type: float), $f3 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: NONE
             ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out b/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
index f76b24e..9ec9dca 100644
--- a/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
+++ b/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out
@@ -114,12 +114,12 @@ STAGE PLANS:
         TableScan
           alias: dynamic_part_table
           filterExpr: ((partcol1 = '1') and (partcol2 = '1')) (type: boolean)
-          Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: intcol (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: explain extended select intcol from dynamic_part_table where partcol1='1' and partcol2='1'
@@ -194,12 +194,12 @@ STAGE PLANS:
         TableScan
           alias: dynamic_part_table
           filterExpr: ((partcol1 = '1') and (partcol2 = '1')) (type: boolean)
-          Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: intcol (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: explain extended select intcol from dynamic_part_table where (partcol1='1' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__')
@@ -320,11 +320,11 @@ STAGE PLANS:
         TableScan
           alias: dynamic_part_table
           filterExpr: ((partcol2) IN ('1', '__HIVE_DEFAULT_PARTITION__') and (partcol1 = '1')) (type: boolean)
-          Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 170 Basic stats: COMPLETE Column stats: PARTIAL
           GatherStats: false
           Select Operator
             expressions: intcol (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 170 Basic stats: COMPLETE Column stats: PARTIAL
             ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
index 6c71200..5526d8d 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
@@ -40,11 +40,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: srcpart
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: _col2 (type: string), _col3 (type: string), _bucket_number (type: string), _col1 (type: string)
                 sort order: ++++
@@ -58,7 +58,7 @@ STAGE PLANS:
           File Output Operator
             compressed: false
             Dp Sort State: PARTITION_BUCKET_SORTED
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2000 Data size: 1092000 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/empty_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/empty_join.q.out b/ql/src/test/results/clientpositive/empty_join.q.out
index 9d8ff4a..f17be28 100644
--- a/ql/src/test/results/clientpositive/empty_join.q.out
+++ b/ql/src/test/results/clientpositive/empty_join.q.out
@@ -80,14 +80,14 @@ STAGE PLANS:
           TableScan
             alias: t2
             filterExpr: id is not null (type: boolean)
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: id is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: int)
@@ -97,14 +97,14 @@ STAGE PLANS:
           TableScan
             alias: t3
             filterExpr: id is not null (type: boolean)
-            Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: id is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: id (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: int)
@@ -117,14 +117,14 @@ STAGE PLANS:
           TableScan
             alias: t1
             filterExpr: id is not null (type: boolean)
-            Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: id is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: id (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Map Join Operator
                   condition map:
                        Left Outer Join 0 to 1
@@ -134,10 +134,10 @@ STAGE PLANS:
                     1 _col0 (type: int)
                     2 _col0 (type: int)
                   outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 2 Data size: 2 Basic stats: PARTIAL Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 2 Data size: 2 Basic stats: PARTIAL Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
index b6d726e..e5e6fd1 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
@@ -560,44 +560,44 @@ STAGE PLANS:
           TableScan
             alias: t1
             filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
               predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: string), value (type: string), UDFToDouble(key) (type: double)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col2 (type: double)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col2 (type: double)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE
                   tag: 0
                   value expressions: _col0 (type: string), _col1 (type: string)
                   auto parallelism: false
           TableScan
             alias: t2
             filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
               predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int), value (type: string), UDFToDouble(key) (type: double)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col2 (type: double)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col2 (type: double)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
                   tag: 1
                   value expressions: _col0 (type: int), _col1 (type: string)
                   auto parallelism: false
@@ -717,17 +717,17 @@ STAGE PLANS:
             0 _col2 (type: double)
             1 _col2 (type: double)
           outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 791 Data size: 215943 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: int), _col4 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 791 Data size: 215943 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
               GlobalTableId: 0
               directory: hdfs://### HDFS PATH ###
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 791 Data size: 215943 Basic stats: COMPLETE Column stats: COMPLETE
               Stats Publishing Key Prefix: hdfs://### HDFS PATH ###
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
index 3d55e2d..34f3850 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
@@ -78,44 +78,44 @@ STAGE PLANS:
           TableScan
             alias: t1
             filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89488 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
               predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
           TableScan
             alias: t2
             filterExpr: key is not null (type: boolean)
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89488 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
               predicate: key is not null (type: boolean)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 475 Data size: 85013 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -235,13 +235,13 @@ STAGE PLANS:
             0 _col0 (type: int)
             1 _col0 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 522 Data size: 93514 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
             directory: hdfs://### HDFS PATH ###
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 522 Data size: 93514 Basic stats: COMPLETE Column stats: NONE
             Stats Publishing Key Prefix: hdfs://### HDFS PATH ###
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out b/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out
index c201c92..adad89e 100644
--- a/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out
+++ b/ql/src/test/results/clientpositive/erasurecoding/erasure_explain.q.out
@@ -91,11 +91,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: srcpart
-          Statistics: Num rows: 2000 Data size: 21248 Erasure files: 4 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2000 Data size: 356000 Erasure files: 4 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: key (type: string), value (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2000 Data size: 21248 Erasure files: 4 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2000 Data size: 356000 Erasure files: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: explain extended select key, value from srcpart
@@ -327,12 +327,12 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: srcpart
-          Statistics: Num rows: 2000 Data size: 21248 Erasure files: 4 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2000 Data size: 356000 Erasure files: 4 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: key (type: string), value (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 2000 Data size: 21248 Erasure files: 4 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2000 Data size: 356000 Erasure files: 4 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: show table extended like src
@@ -408,11 +408,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 5312 Erasure files: 1 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 500 Data size: 89000 Erasure files: 1 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: key (type: string), value (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 5312 Erasure files: 1 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Erasure files: 1 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 
 PREHOOK: query: explain extended select key, value from src
@@ -435,11 +435,11 @@ STAGE PLANS:
       Processor Tree:
         TableScan
           alias: src
-          Statistics: Num rows: 500 Data size: 5312 Erasure files: 1 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 500 Data size: 89000 Erasure files: 1 Basic stats: COMPLETE Column stats: COMPLETE
           GatherStats: false
           Select Operator
             expressions: key (type: string), value (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 500 Data size: 5312 Erasure files: 1 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Erasure files: 1 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/escape_clusterby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/escape_clusterby1.q.out b/ql/src/test/results/clientpositive/escape_clusterby1.q.out
index 116b0fd..e9ddf8a 100644
--- a/ql/src/test/results/clientpositive/escape_clusterby1.q.out
+++ b/ql/src/test/results/clientpositive/escape_clusterby1.q.out
@@ -18,25 +18,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: ++
                 Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -68,25 +68,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: _col0 (type: string), _col1 (type: string)
                 sort order: ++
                 Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/268a6e5a/ql/src/test/results/clientpositive/escape_distributeby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/escape_distributeby1.q.out b/ql/src/test/results/clientpositive/escape_distributeby1.q.out
index 1e006f7..40e9abf 100644
--- a/ql/src/test/results/clientpositive/escape_distributeby1.q.out
+++ b/ql/src/test/results/clientpositive/escape_distributeby1.q.out
@@ -18,25 +18,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 sort order: 
                 Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                 value expressions: _col0 (type: string), _col1 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -68,25 +68,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 sort order: 
                 Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                 value expressions: _col0 (type: string), _col1 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat