You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2017/11/07 22:33:38 UTC

[01/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Repository: hive
Updated Branches:
  refs/heads/master 945404273 -> ec9cc0bc2


http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_outer_reference_windowed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_reference_windowed.q.out b/ql/src/test/results/clientpositive/vector_outer_reference_windowed.q.out
index 2971038..cc70cef 100644
--- a/ql/src/test/results/clientpositive/vector_outer_reference_windowed.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_reference_windowed.q.out
@@ -177,55 +177,67 @@ c1	c2
 PREHOOK: query: ANALYZE TABLE e011_01 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_01
+PREHOOK: Output: default@e011_01
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_01 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_01
+POSTHOOK: Output: default@e011_01
 #### A masked pattern was here ####
 _c0	_c1
 PREHOOK: query: ANALYZE TABLE e011_02 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_02
+PREHOOK: Output: default@e011_02
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_02 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_02
+POSTHOOK: Output: default@e011_02
 #### A masked pattern was here ####
 _c0	_c1
 PREHOOK: query: ANALYZE TABLE e011_03 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_03
+PREHOOK: Output: default@e011_03
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_03 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_03
+POSTHOOK: Output: default@e011_03
 #### A masked pattern was here ####
 _c0	_c1
 PREHOOK: query: ANALYZE TABLE e011_01_small COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_01_small
+PREHOOK: Output: default@e011_01_small
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_01_small COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_01_small
+POSTHOOK: Output: default@e011_01_small
 #### A masked pattern was here ####
 _c0	_c1
 PREHOOK: query: ANALYZE TABLE e011_02_small COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_02_small
+PREHOOK: Output: default@e011_02_small
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_02_small COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_02_small
+POSTHOOK: Output: default@e011_02_small
 #### A masked pattern was here ####
 _c0	_c1
 PREHOOK: query: ANALYZE TABLE e011_03_small COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_03_small
+PREHOOK: Output: default@e011_03_small
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_03_small COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_03_small
+POSTHOOK: Output: default@e011_03_small
 #### A masked pattern was here ####
 _c0	_c1
 PREHOOK: query: explain vectorization detail
@@ -250,7 +262,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             TableScan Vectorization:
                 native: true
                 vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
@@ -261,7 +273,7 @@ STAGE PLANS:
                   className: VectorSelectOperator
                   native: true
                   projectedOutputColumnNums: [0]
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(c1)
                 Group By Vectorization:
@@ -435,7 +447,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             TableScan Vectorization:
                 native: true
                 vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
@@ -446,7 +458,7 @@ STAGE PLANS:
                   className: VectorSelectOperator
                   native: true
                   projectedOutputColumnNums: [0, 1]
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(c1)
                 Group By Vectorization:
@@ -460,7 +472,7 @@ STAGE PLANS:
                 keys: c1 (type: decimal(15,2)), c2 (type: decimal(15,2))
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(15,2)), _col1 (type: decimal(15,2))
                   sort order: ++
@@ -470,7 +482,7 @@ STAGE PLANS:
                       native: false
                       nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: decimal(25,2))
       Execution mode: vectorized
       Map Vectorization:
@@ -498,7 +510,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -522,7 +534,7 @@ STAGE PLANS:
                   native: false
                   nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(25,2))
       Execution mode: vectorized
       Map Vectorization:
@@ -548,7 +560,7 @@ STAGE PLANS:
         Select Operator
           expressions: KEY.reducesinkkey1 (type: decimal(15,2)), KEY.reducesinkkey0 (type: decimal(15,2)), VALUE._col0 (type: decimal(25,2))
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -568,14 +580,14 @@ STAGE PLANS:
                         name: sum
                         window function: GenericUDAFSumHiveDecimal
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sum_window_0 (type: decimal(35,2))
               outputColumnNames: _col0
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -637,19 +649,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(15,2)), c2 (type: decimal(15,2))
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(15,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(15,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: decimal(15,2))
           TableScan
             alias: e011_03
@@ -681,13 +693,13 @@ STAGE PLANS:
             0 _col0 (type: decimal(15,2))
             1 _col0 (type: decimal(15,2))
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             aggregations: sum(_col0)
             keys: _col0 (type: decimal(15,2)), _col1 (type: decimal(15,2))
             mode: hash
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -711,7 +723,7 @@ STAGE PLANS:
                   native: false
                   nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-              Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(25,2))
       Execution mode: vectorized
       Map Vectorization:
@@ -739,7 +751,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -763,7 +775,7 @@ STAGE PLANS:
                   native: false
                   nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(25,2))
       Execution mode: vectorized
       Map Vectorization:
@@ -789,7 +801,7 @@ STAGE PLANS:
         Select Operator
           expressions: KEY.reducesinkkey1 (type: decimal(15,2)), KEY.reducesinkkey0 (type: decimal(15,2)), VALUE._col0 (type: decimal(25,2))
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -809,14 +821,14 @@ STAGE PLANS:
                         name: sum
                         window function: GenericUDAFSumHiveDecimal
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sum_window_0 (type: decimal(35,2))
               outputColumnNames: _col0
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -898,19 +910,19 @@ STAGE PLANS:
                   value expressions: _col1 (type: decimal(15,2))
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(15,2))
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(15,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(15,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
       Map Vectorization:
           enabled: false
           enabledConditionsNotMet: Vectorized map work only works with 1 TableScanOperator IS false
@@ -1126,19 +1138,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(15,2)), c2 (type: decimal(15,2))
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(15,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(15,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: decimal(15,2))
           TableScan
             alias: e011_03
@@ -1171,13 +1183,13 @@ STAGE PLANS:
             0 _col0 (type: decimal(15,2))
             1 _col0 (type: decimal(15,2))
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             aggregations: corr(_col0, _col2)
             keys: _col1 (type: decimal(15,2)), _col3 (type: decimal(15,2))
             mode: hash
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -1201,7 +1213,7 @@ STAGE PLANS:
                   native: false
                   nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-              Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: struct<count:bigint,xavg:double,yavg:double,xvar:double,yvar:double,covar:double>)
       Execution mode: vectorized
       Map Vectorization:
@@ -1229,11 +1241,11 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: decimal(15,2)), _col0 (type: decimal(15,2)), _col2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             PTF Operator
               Function definitions:
                   Input definition
@@ -1253,14 +1265,14 @@ STAGE PLANS:
                           name: sum
                           window function: GenericUDAFSumDouble
                           window frame: RANGE PRECEDING(MAX)~CURRENT
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: sum_window_0 (type: double)
                 outputColumnNames: _col0
-                Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1317,7 +1329,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01_small
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             TableScan Vectorization:
                 native: true
                 vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
@@ -1328,7 +1340,7 @@ STAGE PLANS:
                   className: VectorSelectOperator
                   native: true
                   projectedOutputColumnNums: [0]
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(c1)
                 Group By Vectorization:
@@ -1502,7 +1514,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01_small
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             TableScan Vectorization:
                 native: true
                 vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
@@ -1513,7 +1525,7 @@ STAGE PLANS:
                   className: VectorSelectOperator
                   native: true
                   projectedOutputColumnNums: [0, 1]
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(c1)
                 Group By Vectorization:
@@ -1527,7 +1539,7 @@ STAGE PLANS:
                 keys: c1 (type: decimal(7,2)), c2 (type: decimal(7,2))
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(7,2)), _col1 (type: decimal(7,2))
                   sort order: ++
@@ -1537,7 +1549,7 @@ STAGE PLANS:
                       native: false
                       nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: decimal(17,2))
       Execution mode: vectorized
       Map Vectorization:
@@ -1565,7 +1577,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(7,2)), KEY._col1 (type: decimal(7,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -1589,7 +1601,7 @@ STAGE PLANS:
                   native: false
                   nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(17,2))
       Execution mode: vectorized
       Map Vectorization:
@@ -1615,7 +1627,7 @@ STAGE PLANS:
         Select Operator
           expressions: KEY.reducesinkkey1 (type: decimal(7,2)), KEY.reducesinkkey0 (type: decimal(7,2)), VALUE._col0 (type: decimal(17,2))
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -1635,14 +1647,14 @@ STAGE PLANS:
                         name: sum
                         window function: GenericUDAFSumHiveDecimal
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sum_window_0 (type: decimal(27,2))
               outputColumnNames: _col0
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1704,19 +1716,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01_small
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(7,2)), c2 (type: decimal(7,2))
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(7,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(7,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: decimal(7,2))
           TableScan
             alias: e011_03_small
@@ -1748,13 +1760,13 @@ STAGE PLANS:
             0 _col0 (type: decimal(7,2))
             1 _col0 (type: decimal(7,2))
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             aggregations: sum(_col0)
             keys: _col0 (type: decimal(7,2)), _col1 (type: decimal(7,2))
             mode: hash
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -1778,7 +1790,7 @@ STAGE PLANS:
                   native: false
                   nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-              Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(17,2))
       Execution mode: vectorized
       Map Vectorization:
@@ -1806,7 +1818,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(7,2)), KEY._col1 (type: decimal(7,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -1830,7 +1842,7 @@ STAGE PLANS:
                   native: false
                   nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(17,2))
       Execution mode: vectorized
       Map Vectorization:
@@ -1856,7 +1868,7 @@ STAGE PLANS:
         Select Operator
           expressions: KEY.reducesinkkey1 (type: decimal(7,2)), KEY.reducesinkkey0 (type: decimal(7,2)), VALUE._col0 (type: decimal(17,2))
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -1876,14 +1888,14 @@ STAGE PLANS:
                         name: sum
                         window function: GenericUDAFSumHiveDecimal
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sum_window_0 (type: decimal(27,2))
               outputColumnNames: _col0
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1965,19 +1977,19 @@ STAGE PLANS:
                   value expressions: _col1 (type: decimal(7,2))
           TableScan
             alias: e011_01_small
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(7,2))
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(7,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(7,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
       Map Vectorization:
           enabled: false
           enabledConditionsNotMet: Vectorized map work only works with 1 TableScanOperator IS false
@@ -2193,19 +2205,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01_small
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(7,2)), c2 (type: decimal(7,2))
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(7,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(7,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: decimal(7,2))
           TableScan
             alias: e011_03_small
@@ -2238,13 +2250,13 @@ STAGE PLANS:
             0 _col0 (type: decimal(7,2))
             1 _col0 (type: decimal(7,2))
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             aggregations: corr(_col0, _col2)
             keys: _col1 (type: decimal(7,2)), _col3 (type: decimal(7,2))
             mode: hash
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -2268,7 +2280,7 @@ STAGE PLANS:
                   native: false
                   nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-              Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: struct<count:bigint,xavg:double,yavg:double,xvar:double,yvar:double,covar:double>)
       Execution mode: vectorized
       Map Vectorization:
@@ -2296,11 +2308,11 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(7,2)), KEY._col1 (type: decimal(7,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: decimal(7,2)), _col0 (type: decimal(7,2)), _col2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             PTF Operator
               Function definitions:
                   Input definition
@@ -2320,14 +2332,14 @@ STAGE PLANS:
                           name: sum
                           window function: GenericUDAFSumDouble
                           window frame: RANGE PRECEDING(MAX)~CURRENT
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: sum_window_0 (type: double)
                 outputColumnNames: _col0
-                Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
index ad1d0ff..d7c34ac 100644
--- a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
+++ b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
@@ -175,7 +175,8 @@ STAGE PLANS:
               name: default.decimal_2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator
@@ -371,7 +372,8 @@ STAGE PLANS:
           isTemporary: true
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_udf_character_length.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_udf_character_length.q.out b/ql/src/test/results/clientpositive/vector_udf_character_length.q.out
index 81d801c..2960cd6 100644
--- a/ql/src/test/results/clientpositive/vector_udf_character_length.q.out
+++ b/ql/src/test/results/clientpositive/vector_udf_character_length.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -251,8 +252,8 @@ POSTHOOK: query: SELECT character_length(dest2.name) FROM dest2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest2
 #### A masked pattern was here ####
-NULL
 2
+NULL
 PREHOOK: query: EXPLAIN SELECT char_length(dest2.name) FROM dest2
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT char_length(dest2.name) FROM dest2
@@ -295,8 +296,8 @@ POSTHOOK: query: SELECT char_length(dest2.name) FROM dest2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest2
 #### A masked pattern was here ####
-NULL
 2
+NULL
 PREHOOK: query: DROP TABLE dest1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out b/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out
index c71cfef..46f5d65 100644
--- a/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out
+++ b/ql/src/test/results/clientpositive/vector_udf_octet_length.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -234,8 +235,8 @@ POSTHOOK: query: SELECT octet_length(dest2.name) FROM dest2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest2
 #### A masked pattern was here ####
-NULL
 6
+NULL
 PREHOOK: query: DROP TABLE dest1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/vector_varchar_4.q.out
index ad6e739..59ee40a 100644
--- a/ql/src/test/results/clientpositive/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/vector_varchar_4.q.out
@@ -201,7 +201,8 @@ STAGE PLANS:
               name: default.varchar_lazy_binary_columnar
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
index 26599cb..0a144d2 100644
--- a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
@@ -358,7 +358,8 @@ STAGE PLANS:
               name: default.varchar_3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table varchar_3 select cint from alltypesorc limit 10
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_windowing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_windowing.q.out b/ql/src/test/results/clientpositive/vector_windowing.q.out
index def616e..d12b1c1 100644
--- a/ql/src/test/results/clientpositive/vector_windowing.q.out
+++ b/ql/src/test/results/clientpositive/vector_windowing.q.out
@@ -5182,7 +5182,8 @@ STAGE PLANS:
               name: default.part_1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -5415,7 +5416,8 @@ STAGE PLANS:
               name: default.part_2
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-9
     Map Reduce
@@ -5564,7 +5566,8 @@ STAGE PLANS:
               name: default.part_3
 
   Stage: Stage-11
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from part 
 INSERT OVERWRITE TABLE part_1 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_windowing_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_windowing_expressions.q.out b/ql/src/test/results/clientpositive/vector_windowing_expressions.q.out
index f7c58c9..7a26191 100644
--- a/ql/src/test/results/clientpositive/vector_windowing_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_windowing_expressions.q.out
@@ -1644,7 +1644,8 @@ STAGE PLANS:
               name: default.t1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1657,7 +1658,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select *
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_windowing_streaming.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_windowing_streaming.q.out b/ql/src/test/results/clientpositive/vector_windowing_streaming.q.out
index e142f40..9c07d4e 100644
--- a/ql/src/test/results/clientpositive/vector_windowing_streaming.q.out
+++ b/ql/src/test/results/clientpositive/vector_windowing_streaming.q.out
@@ -775,7 +775,8 @@ STAGE PLANS:
           name: default.sD
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table sD ROW FORMAT DELIMITED FIELDS TERMINATED BY ','  STORED AS TEXTFILE as  
 select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from  alltypesorc) a where r < 5

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out b/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
index 42b6328..6d81046 100644
--- a/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
@@ -54,14 +54,14 @@ STAGE PLANS:
         $hdt$_0:x 
           TableScan
             alias: x
-            Statistics: Num rows: 1 Data size: 181 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: a is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 181 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: a (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 181 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: int)
@@ -72,7 +72,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: y
-            Statistics: Num rows: 1 Data size: 181 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
             TableScan Vectorization:
                 native: true
             Filter Operator
@@ -81,7 +81,7 @@ STAGE PLANS:
                   native: true
                   predicateExpression: SelectColumnIsNotNull(col 0:int)
               predicate: b is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 181 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: b (type: int)
                 outputColumnNames: _col0
@@ -89,7 +89,7 @@ STAGE PLANS:
                     className: VectorSelectOperator
                     native: true
                     projectedOutputColumnNums: [0]
-                Statistics: Num rows: 1 Data size: 181 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -101,7 +101,7 @@ STAGE PLANS:
                       native: false
                       nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Fast Hash Table and No Hybrid Hash Join IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-                  Statistics: Num rows: 1 Data size: 199 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count()
                     Group By Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index e2e3ada..5919089 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -258,6 +258,14 @@ public class StatsSetupConst {
     }
   }
 
+  public static boolean canColumnStatsMerge(Map<String, String> params, String colName) {
+    if (params == null) {
+      return false;
+    }
+    ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+    return stats.columnStats.containsKey(colName);
+  }
+  
   public static void clearColumnStatsState(Map<String, String> params) {
     if (params == null) {
       return;
@@ -295,7 +303,9 @@ public class StatsSetupConst {
       }
     }
     setBasicStatsState(params, setting);
-    setColumnStatsState(params, cols);
+    if (TRUE.equals(setting)) {
+      setColumnStatsState(params, cols);
+    }
   }
   
   private static ColumnStatsAccurate parseStatsAcc(String statsAcc) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index ccadac1..921cfc0 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -252,7 +252,7 @@ public class HiveAlterHandler implements AlterHandler {
             part.setDbName(newDbName);
             part.setTableName(newTblName);
             ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name,
-                part.getValues(), part.getSd().getCols(), oldt, part);
+                part.getValues(), part.getSd().getCols(), oldt, part, null);
             if (colStats != null) {
               columnStatsNeedUpdated.put(part, colStats);
             }
@@ -291,7 +291,7 @@ public class HiveAlterHandler implements AlterHandler {
               List<FieldSchema> oldCols = part.getSd().getCols();
               part.getSd().setCols(newt.getSd().getCols());
               ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name,
-                  part.getValues(), oldCols, oldt, part);
+                  part.getValues(), oldCols, oldt, part, null);
               assert(colStats == null);
               msdb.alterPartition(dbname, name, part.getValues(), part);
             }
@@ -434,7 +434,7 @@ public class HiveAlterHandler implements AlterHandler {
         // PartitionView does not have SD. We do not need update its column stats
         if (oldPart.getSd() != null) {
           updateOrGetPartitionColumnStats(msdb, dbname, name, new_part.getValues(),
-              oldPart.getSd().getCols(), tbl, new_part);
+              oldPart.getSd().getCols(), tbl, new_part, null);
         }
         msdb.alterPartition(dbname, name, new_part.getValues(), new_part);
         if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
@@ -561,7 +561,7 @@ public class HiveAlterHandler implements AlterHandler {
 
       String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
       ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, dbname, name, oldPart.getValues(),
-          oldPart.getSd().getCols(), tbl, new_part);
+          oldPart.getSd().getCols(), tbl, new_part, null);
       msdb.alterPartition(dbname, name, part_vals, new_part);
       if (cs != null) {
         cs.getStatsDesc().setPartName(newPartName);
@@ -659,7 +659,7 @@ public class HiveAlterHandler implements AlterHandler {
         // PartitionView does not have SD and we do not need to update its column stats
         if (oldTmpPart.getSd() != null) {
           updateOrGetPartitionColumnStats(msdb, dbname, name, oldTmpPart.getValues(),
-              oldTmpPart.getSd().getCols(), tbl, tmpPart);
+              oldTmpPart.getSd().getCols(), tbl, tmpPart, null);
         }
       }
 
@@ -811,12 +811,14 @@ public class HiveAlterHandler implements AlterHandler {
 
   private ColumnStatistics updateOrGetPartitionColumnStats(
       RawStore msdb, String dbname, String tblname, List<String> partVals,
-      List<FieldSchema> oldCols, Table table, Partition part)
+      List<FieldSchema> oldCols, Table table, Partition part, List<FieldSchema> newCols)
           throws MetaException, InvalidObjectException {
     ColumnStatistics newPartsColStats = null;
     try {
-      List<FieldSchema> newCols = part.getSd() == null ?
-          new ArrayList<>() : part.getSd().getCols();
+      // if newCols are not specified, use default ones.
+      if (newCols == null) {
+        newCols = part.getSd() == null ? new ArrayList<>() : part.getSd().getCols();
+      }
       String oldPartName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
       String newPartName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
       boolean rename = !part.getDbName().equals(dbname) || !part.getTableName().equals(tblname)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 7334a0c..63081e7 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -7508,7 +7508,7 @@ public class ObjectStore implements RawStore, Configurable {
         MTableColumnStatistics mStatsObj = StatObjectConverter.convertToMTableColumnStatistics(
             ensureGetMTable(statsDesc.getDbName(), statsDesc.getTableName()), statsDesc, statsObj);
         writeMTableColumnStatistics(table, mStatsObj, oldStats.get(statsObj.getColName()));
-        colNames.add(statsObj.getColName());
+        // There is no need to add colname again, otherwise we will get duplicate colNames.
       }
 
       // Set the table properties

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
index f7962a4..25caf29 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
@@ -118,6 +118,11 @@ public final class TransactionalValidationListener extends MetaStorePreEventList
       }
     }
 
+    if (transactionalValuePresent && "false".equalsIgnoreCase(transactionalValue)) {
+      transactionalValuePresent = false;
+      transactionalValue = null;
+    }
+
     if (transactionalValuePresent) {
       //normalize prop name
       parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, transactionalValue);

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java
index 1a2d38e..64d07c7 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java
@@ -70,7 +70,7 @@ public class ColumnStatsMergerFactory {
       break;
     }
     default:
-      throw new IllegalArgumentException("Unknown stats type " + typeNew.toString());
+      throw new IllegalArgumentException("Unknown stats type " + statsObjNew.getStatsData().getSetField());
     }
     return agg;
   }


[08/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/tez_nway_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_nway_join.q.out b/ql/src/test/results/clientpositive/llap/tez_nway_join.q.out
index 74643cb..5e977c8 100644
--- a/ql/src/test/results/clientpositive/llap/tez_nway_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_nway_join.q.out
@@ -49,14 +49,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -68,7 +68,7 @@ STAGE PLANS:
                         input vertices:
                           1 Map 3
                           2 Map 4
-                        Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -84,38 +84,38 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: c
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 
@@ -161,14 +161,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -178,7 +178,7 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 13 Basic stats: COMPLETE Column stats: NONE
                         Map Join Operator
                           condition map:
                                Inner Join 0 to 1
@@ -187,7 +187,7 @@ STAGE PLANS:
                             1 _col0 (type: int)
                           input vertices:
                             1 Map 4
-                          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 3 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                           Group By Operator
                             aggregations: count()
                             mode: hash
@@ -203,38 +203,38 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: c
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 
@@ -302,48 +302,48 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: c
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 
@@ -357,7 +357,7 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                   2 _col0 (type: int)
-                Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -410,11 +410,11 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
                            Left Outer Join 0 to 1
@@ -424,7 +424,7 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 3
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 13 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Left Outer Join 0 to 1
@@ -433,7 +433,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 4
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -449,32 +449,32 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: c
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: int)
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: all inputs
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out b/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out
index 2ca78d7..5dfdede 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out
@@ -122,7 +122,8 @@ STAGE PLANS:
               name: default.partunion1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table partunion1 partition(part1)
 select temps.* from (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition_2.q.out b/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition_2.q.out
index 78a17df..e7599a4 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition_2.q.out
@@ -144,7 +144,8 @@ STAGE PLANS:
               name: default.partunion1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out b/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out
index d399c5e..f9535c5 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out
@@ -237,7 +237,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -250,7 +251,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
       select key, value from (
@@ -1144,7 +1146,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1157,7 +1160,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
       select key, value from src s0
@@ -2047,7 +2051,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -2060,7 +2065,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
       select key, value from src s0
@@ -2908,7 +2914,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -2921,7 +2928,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION all 
@@ -3755,7 +3763,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3768,7 +3777,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION distinct 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/union4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/union4.q.out b/ql/src/test/results/clientpositive/llap/union4.q.out
index 796bc60..2716072 100644
--- a/ql/src/test/results/clientpositive/llap/union4.q.out
+++ b/ql/src/test/results/clientpositive/llap/union4.q.out
@@ -134,7 +134,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/union6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/union6.q.out b/ql/src/test/results/clientpositive/llap/union6.q.out
index 4043e3c..b001433 100644
--- a/ql/src/test/results/clientpositive/llap/union6.q.out
+++ b/ql/src/test/results/clientpositive/llap/union6.q.out
@@ -107,7 +107,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out b/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out
index 9401479..6b930c8 100644
--- a/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out
@@ -203,7 +203,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
   select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
@@ -664,7 +665,8 @@ STAGE PLANS:
               name: default.tmptable12
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable12
   select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
@@ -2868,7 +2870,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -2881,7 +2884,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION DISTINCT  
@@ -3696,7 +3700,8 @@ STAGE PLANS:
               name: default.dest118
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3709,7 +3714,8 @@ STAGE PLANS:
               name: default.dest218
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION DISTINCT  
@@ -4534,7 +4540,8 @@ STAGE PLANS:
               name: default.dest119
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -4547,7 +4554,8 @@ STAGE PLANS:
               name: default.dest219
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION DISTINCT  
@@ -6813,7 +6821,8 @@ STAGE PLANS:
               name: default.dst_union22
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table dst_union22 partition (ds='2')
@@ -9350,7 +9359,8 @@ STAGE PLANS:
           name: default.tmp_unionall
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -11025,7 +11035,8 @@ STAGE PLANS:
               name: default.union_subq_union
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table union_subq_union 
 select * from (
@@ -11257,7 +11268,8 @@ STAGE PLANS:
               name: default.union_subq_union29
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table union_subq_union29 
 select * from (
@@ -11883,7 +11895,8 @@ STAGE PLANS:
               name: default.union_subq_union30
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table union_subq_union30 
 select * from (
@@ -12172,7 +12185,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -12185,7 +12199,8 @@ STAGE PLANS:
               name: default.t4
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from
 (select * from t1
@@ -12462,7 +12477,8 @@ STAGE PLANS:
               name: default.t5
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -12475,7 +12491,8 @@ STAGE PLANS:
               name: default.t6
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from
 (
@@ -12776,7 +12793,8 @@ STAGE PLANS:
               name: default.t7
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -12789,7 +12807,8 @@ STAGE PLANS:
               name: default.t8
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from
 (
@@ -13756,7 +13775,8 @@ STAGE PLANS:
               name: default.test_src
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
@@ -13928,7 +13948,8 @@ STAGE PLANS:
               name: default.test_src
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
@@ -14631,7 +14652,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1
@@ -14949,7 +14971,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/union_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/union_stats.q.out b/ql/src/test/results/clientpositive/llap/union_stats.q.out
index 4178684..a492757 100644
--- a/ql/src/test/results/clientpositive/llap/union_stats.q.out
+++ b/ql/src/test/results/clientpositive/llap/union_stats.q.out
@@ -202,7 +202,8 @@ STAGE PLANS:
           name: default.t
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/union_top_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/union_top_level.q.out b/ql/src/test/results/clientpositive/llap/union_top_level.q.out
index cfbc069..a010232 100644
--- a/ql/src/test/results/clientpositive/llap/union_top_level.q.out
+++ b/ql/src/test/results/clientpositive/llap/union_top_level.q.out
@@ -562,7 +562,8 @@ STAGE PLANS:
           name: default.union_top
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -796,7 +797,8 @@ STAGE PLANS:
               name: default.union_top
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table union_top
 select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a
@@ -1016,7 +1018,8 @@ STAGE PLANS:
               name: default.union_top
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table union_top
 select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
index c893e84..54216fa 100644
--- a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
@@ -1932,7 +1932,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1945,7 +1946,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
@@ -2195,7 +2197,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -2208,7 +2211,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_bucket.q.out b/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
index 0f97d43..146014f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.non_orc_table
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: select a, b from non_orc_table order by a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_4.q.out b/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
index 0e4b276..a459f8b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
@@ -195,5 +195,6 @@ STAGE PLANS:
               name: default.char_lazy_binary_columnar
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out b/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
index 5065c72..faae865 100644
--- a/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
@@ -854,7 +854,8 @@ STAGE PLANS:
               name: default.orc_create_complex
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 Warning: Shuffle Join MERGEJOIN[15][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_0]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: INSERT INTO TABLE orc_create_complex

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
index 1f30170..1a2f453 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
@@ -742,7 +742,8 @@ STAGE PLANS:
           name: default.DECIMAL_6_3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
index 5cc19d0..ce89356 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
@@ -158,7 +158,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
index dc02b71..cb98fc4 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
@@ -158,7 +158,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out
index 9df29c3..a17f457 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out
@@ -764,7 +764,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -777,7 +778,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T1
 INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out
index 7852b0e..d1263cd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out
@@ -1003,7 +1003,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1016,7 +1017,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T1
 INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out b/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
index b2903a6..4bf6a03 100644
--- a/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
@@ -170,7 +170,8 @@ STAGE PLANS:
               name: default.orc_rn1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -183,7 +184,8 @@ STAGE PLANS:
               name: default.orc_rn2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -196,7 +198,8 @@ STAGE PLANS:
               name: default.orc_rn3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from orc1 a
 insert overwrite table orc_rn1 select a.* where a.rn < 100

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_udf_character_length.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_udf_character_length.q.out b/ql/src/test/results/clientpositive/llap/vector_udf_character_length.q.out
index e596cf2..281a03d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_udf_character_length.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_udf_character_length.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT character_length(src1.value)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_udf_octet_length.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_udf_octet_length.q.out b/ql/src/test/results/clientpositive/llap/vector_udf_octet_length.q.out
index b156dc6..5b98553 100644
--- a/ql/src/test/results/clientpositive/llap/vector_udf_octet_length.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_udf_octet_length.q.out
@@ -70,7 +70,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src1 INSERT OVERWRITE TABLE dest1 SELECT octet_length(src1.value)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
index aa8528f..d883c6d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
@@ -195,5 +195,6 @@ STAGE PLANS:
               name: default.varchar_lazy_binary_columnar
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
index 69a3661..6d2d728 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
@@ -409,7 +409,8 @@ STAGE PLANS:
               name: default.varchar_3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table varchar_3 select cint from alltypesorc limit 10
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_windowing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing.q.out
index 1ff3e8e..776ae3e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing.q.out
@@ -5717,7 +5717,8 @@ STAGE PLANS:
               name: default.part_1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -5730,7 +5731,8 @@ STAGE PLANS:
               name: default.part_2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -5743,7 +5745,8 @@ STAGE PLANS:
               name: default.part_3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from part 
 INSERT OVERWRITE TABLE part_1 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out
index 7721002..7e9a564 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out
@@ -1830,7 +1830,8 @@ STAGE PLANS:
               name: default.t1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1843,7 +1844,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select *
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out
index 17921fb..61730f5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out
@@ -964,7 +964,8 @@ STAGE PLANS:
           name: default.sD
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
index 7f3eb0d..5c0a90e 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
@@ -128,7 +128,8 @@ STAGE PLANS:
           name: default.srcpart_date
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
index 7d3f2d1..15a301a 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
@@ -4057,7 +4057,8 @@ STAGE PLANS:
               name: default.part_4
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -4070,7 +4071,8 @@ STAGE PLANS:
               name: default.part_5
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from noop(on part_orc 
 partition by p_mfgr 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/load_dyn_part1.q.out
index 43a8bc8..b43bd31 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part1.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part1.q.out
@@ -129,7 +129,8 @@ STAGE PLANS:
               name: default.nzhang_part1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -184,7 +185,8 @@ STAGE PLANS:
               name: default.nzhang_part2
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-10
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part10.q.out b/ql/src/test/results/clientpositive/load_dyn_part10.q.out
index 066b335..aea7798 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part10.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part10.q.out
@@ -80,7 +80,8 @@ STAGE PLANS:
               name: default.nzhang_part10
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from srcpart
 insert overwrite table nzhang_part10 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part13.q.out b/ql/src/test/results/clientpositive/load_dyn_part13.q.out
index f8105c7..8c9df3e 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part13.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part13.q.out
@@ -119,7 +119,8 @@ STAGE PLANS:
               name: default.nzhang_part13
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part13 partition (ds="2010-03-03", hr) 
 select * from (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/load_dyn_part14.q.out
index d65a1e3..8e951b6 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part14.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part14.q.out
@@ -145,7 +145,8 @@ STAGE PLANS:
               name: default.nzhang_part14
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part2.q.out b/ql/src/test/results/clientpositive/load_dyn_part2.q.out
index e8efa5f..9022121 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part2.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part2.q.out
@@ -82,7 +82,8 @@ STAGE PLANS:
               name: default.nzhang_part_bucket
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/load_dyn_part3.q.out
index efe5222..c63b451 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part3.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part3.q.out
@@ -78,7 +78,8 @@ STAGE PLANS:
               name: default.nzhang_part3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part4.q.out b/ql/src/test/results/clientpositive/load_dyn_part4.q.out
index a561cc6..cf57192 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part4.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part4.q.out
@@ -88,7 +88,8 @@ STAGE PLANS:
               name: default.nzhang_part4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part4 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/load_dyn_part8.q.out
index cf2f60f..9e20fdc 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part8.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part8.q.out
@@ -363,7 +363,8 @@ STAGE PLANS:
               name: default.nzhang_part8
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-1
@@ -395,7 +396,8 @@ STAGE PLANS:
               name: default.nzhang_part8
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: from srcpart

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/load_dyn_part9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part9.q.out b/ql/src/test/results/clientpositive/load_dyn_part9.q.out
index 25937df..77e689c 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part9.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part9.q.out
@@ -80,7 +80,8 @@ STAGE PLANS:
               name: default.nzhang_part9
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from srcpart
 insert overwrite table nzhang_part9 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mapreduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapreduce1.q.out b/ql/src/test/results/clientpositive/mapreduce1.q.out
index 3d0a156..602ae77 100644
--- a/ql/src/test/results/clientpositive/mapreduce1.q.out
+++ b/ql/src/test/results/clientpositive/mapreduce1.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mapreduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapreduce2.q.out b/ql/src/test/results/clientpositive/mapreduce2.q.out
index 676c387..a401ac0 100644
--- a/ql/src/test/results/clientpositive/mapreduce2.q.out
+++ b/ql/src/test/results/clientpositive/mapreduce2.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mapreduce3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapreduce3.q.out b/ql/src/test/results/clientpositive/mapreduce3.q.out
index fc1a402..abc3f09 100644
--- a/ql/src/test/results/clientpositive/mapreduce3.q.out
+++ b/ql/src/test/results/clientpositive/mapreduce3.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mapreduce4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapreduce4.q.out b/ql/src/test/results/clientpositive/mapreduce4.q.out
index 17fa029..91275a7 100644
--- a/ql/src/test/results/clientpositive/mapreduce4.q.out
+++ b/ql/src/test/results/clientpositive/mapreduce4.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mapreduce5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapreduce5.q.out b/ql/src/test/results/clientpositive/mapreduce5.q.out
index 21103f8..bb1e9e4 100644
--- a/ql/src/test/results/clientpositive/mapreduce5.q.out
+++ b/ql/src/test/results/clientpositive/mapreduce5.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mapreduce6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapreduce6.q.out b/ql/src/test/results/clientpositive/mapreduce6.q.out
index fe4e631..1a31ebc 100644
--- a/ql/src/test/results/clientpositive/mapreduce6.q.out
+++ b/ql/src/test/results/clientpositive/mapreduce6.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mapreduce7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapreduce7.q.out b/ql/src/test/results/clientpositive/mapreduce7.q.out
index cc97887..b0c24fa 100644
--- a/ql/src/test/results/clientpositive/mapreduce7.q.out
+++ b/ql/src/test/results/clientpositive/mapreduce7.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mapreduce8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapreduce8.q.out b/ql/src/test/results/clientpositive/mapreduce8.q.out
index b1763c7..fc866d5 100644
--- a/ql/src/test/results/clientpositive/mapreduce8.q.out
+++ b/ql/src/test/results/clientpositive/mapreduce8.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/masking_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_11.q.out b/ql/src/test/results/clientpositive/masking_11.q.out
index f29c51f..0941b8c 100644
--- a/ql/src/test/results/clientpositive/masking_11.q.out
+++ b/ql/src/test/results/clientpositive/masking_11.q.out
@@ -13,8 +13,10 @@ POSTHOOK: Lineage: masking_test.value SIMPLE [(src)src.FieldSchema(name:value, t
 PREHOOK: query: analyze table `masking_test` compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@masking_test
+PREHOOK: Output: default@masking_test
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table `masking_test` compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@masking_test
+POSTHOOK: Output: default@masking_test
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out b/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out
index 8b67b55..de49198 100644
--- a/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out
@@ -187,42 +187,52 @@ POSTHOOK: Output: default@lineorder
 PREHOOK: query: analyze table customer compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@customer
+PREHOOK: Output: default@customer
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table customer compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@customer
+POSTHOOK: Output: default@customer
 #### A masked pattern was here ####
 PREHOOK: query: analyze table dates compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dates
+PREHOOK: Output: default@dates
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table dates compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dates
+POSTHOOK: Output: default@dates
 #### A masked pattern was here ####
 PREHOOK: query: analyze table ssb_part compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ssb_part
+PREHOOK: Output: default@ssb_part
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table ssb_part compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@ssb_part
+POSTHOOK: Output: default@ssb_part
 #### A masked pattern was here ####
 PREHOOK: query: analyze table supplier compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@supplier
+PREHOOK: Output: default@supplier
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table supplier compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@supplier
+POSTHOOK: Output: default@supplier
 #### A masked pattern was here ####
 PREHOOK: query: analyze table lineorder compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@lineorder
+PREHOOK: Output: default@lineorder
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table lineorder compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@lineorder
+POSTHOOK: Output: default@lineorder
 #### A masked pattern was here ####
 PREHOOK: query: CREATE MATERIALIZED VIEW `ssb_mv` ENABLE REWRITE
 AS

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out b/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out
index 5e4c96d..a11d668 100644
--- a/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out
@@ -187,42 +187,52 @@ POSTHOOK: Output: default@lineorder
 PREHOOK: query: analyze table customer compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@customer
+PREHOOK: Output: default@customer
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table customer compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@customer
+POSTHOOK: Output: default@customer
 #### A masked pattern was here ####
 PREHOOK: query: analyze table dates compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dates
+PREHOOK: Output: default@dates
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table dates compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dates
+POSTHOOK: Output: default@dates
 #### A masked pattern was here ####
 PREHOOK: query: analyze table ssb_part compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ssb_part
+PREHOOK: Output: default@ssb_part
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table ssb_part compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@ssb_part
+POSTHOOK: Output: default@ssb_part
 #### A masked pattern was here ####
 PREHOOK: query: analyze table supplier compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@supplier
+PREHOOK: Output: default@supplier
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table supplier compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@supplier
+POSTHOOK: Output: default@supplier
 #### A masked pattern was here ####
 PREHOOK: query: analyze table lineorder compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@lineorder
+PREHOOK: Output: default@lineorder
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table lineorder compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@lineorder
+POSTHOOK: Output: default@lineorder
 #### A masked pattern was here ####
 PREHOOK: query: CREATE MATERIALIZED VIEW `ssb_mv` ENABLE REWRITE
 AS

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge1.q.out b/ql/src/test/results/clientpositive/merge1.q.out
index 7423d83..bd8cd7b 100644
--- a/ql/src/test/results/clientpositive/merge1.q.out
+++ b/ql/src/test/results/clientpositive/merge1.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -548,7 +549,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -649,7 +651,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge2.q.out b/ql/src/test/results/clientpositive/merge2.q.out
index bbc55d8..0d781c2 100644
--- a/ql/src/test/results/clientpositive/merge2.q.out
+++ b/ql/src/test/results/clientpositive/merge2.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -548,7 +549,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -649,7 +651,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge3.q.out b/ql/src/test/results/clientpositive/merge3.q.out
index e05d651..22a2820 100644
--- a/ql/src/test/results/clientpositive/merge3.q.out
+++ b/ql/src/test/results/clientpositive/merge3.q.out
@@ -178,7 +178,8 @@ STAGE PLANS:
           name: default.merge_src2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -2558,7 +2559,8 @@ STAGE PLANS:
               name: default.merge_src_part2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -4993,7 +4995,8 @@ STAGE PLANS:
               name: default.merge_src_part2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge4.q.out b/ql/src/test/results/clientpositive/merge4.q.out
index 182c6a8..de4c593 100644
--- a/ql/src/test/results/clientpositive/merge4.q.out
+++ b/ql/src/test/results/clientpositive/merge4.q.out
@@ -65,7 +65,8 @@ STAGE PLANS:
               name: default.nzhang_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -1185,7 +1186,8 @@ STAGE PLANS:
               name: default.nzhang_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -2845,7 +2847,8 @@ STAGE PLANS:
               name: default.nzhang_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out
index a777fe0..990e6df 100644
--- a/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out
+++ b/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out
@@ -92,7 +92,8 @@ STAGE PLANS:
               name: default.merge_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08'
 PREHOOK: type: QUERY
@@ -692,7 +693,8 @@ STAGE PLANS:
               name: default.merge_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -1322,7 +1324,8 @@ STAGE PLANS:
               name: default.merge_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out
index 5a2afb0..ccffc89 100644
--- a/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out
+++ b/ql/src/test/results/clientpositive/merge_dynamic_partition2.q.out
@@ -123,7 +123,8 @@ STAGE PLANS:
               name: default.merge_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out
index 055e07a..68d7bbe 100644
--- a/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out
+++ b/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out
@@ -183,7 +183,8 @@ STAGE PLANS:
               name: default.merge_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out
index cbeaf42..0da56cd 100644
--- a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out
+++ b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out
@@ -184,7 +184,8 @@ STAGE PLANS:
               name: default.merge_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out
index 5a562f4..45237a1 100644
--- a/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out
+++ b/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out
@@ -160,7 +160,8 @@ STAGE PLANS:
               name: default.merge_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/metadata_only_queries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/metadata_only_queries.q.out
index 72a90fb..0fe6684 100644
--- a/ql/src/test/results/clientpositive/metadata_only_queries.q.out
+++ b/ql/src/test/results/clientpositive/metadata_only_queries.q.out
@@ -395,40 +395,54 @@ STAGE PLANS:
 PREHOOK: query: analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl
+PREHOOK: Output: default@stats_tbl
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl
+POSTHOOK: Output: default@stats_tbl
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2010
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2010
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2010
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2010
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2011
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2011
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2011
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2011
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2012
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2012
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2012
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2012
 #### A masked pattern was here ####
 PREHOOK: query: explain 
 select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out b/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out
index 6376aa7..79d9d27 100644
--- a/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out
+++ b/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out
@@ -126,21 +126,29 @@ PREHOOK: query: analyze table stats_tbl_part partition(dt=2010) compute statisti
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2010
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2010
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2010
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2010
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_tbl_part partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2014
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2014
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2014
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2014
 #### A masked pattern was here ####
 PREHOOK: query: explain 
 select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_all.q.out b/ql/src/test/results/clientpositive/mm_all.q.out
index 56a98e7..5ad5957 100644
--- a/ql/src/test/results/clientpositive/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/mm_all.q.out
@@ -105,7 +105,8 @@ STAGE PLANS:
           micromanaged table: true
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator
@@ -1593,10 +1594,10 @@ POSTHOOK: Output: default@multi1_mm@p=1
 POSTHOOK: Output: default@multi1_mm@p=455
 POSTHOOK: Output: default@multi1_mm@p=456
 POSTHOOK: Output: default@multi1_mm@p=457
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multi_insert_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/multi_insert_gby.q.out
index 0ed05dd..85fd8b7 100644
--- a/ql/src/test/results/clientpositive/multi_insert_gby.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_gby.q.out
@@ -105,7 +105,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -118,7 +119,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE e1
@@ -284,7 +286,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -297,7 +300,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE e1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multi_insert_gby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_gby2.q.out b/ql/src/test/results/clientpositive/multi_insert_gby2.q.out
index d1da4e7..ceb6940 100644
--- a/ql/src/test/results/clientpositive/multi_insert_gby2.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_gby2.q.out
@@ -95,7 +95,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -108,7 +109,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key, cast(key as double) as value from src order by key) a
 INSERT OVERWRITE TABLE e1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multi_insert_gby3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_gby3.q.out b/ql/src/test/results/clientpositive/multi_insert_gby3.q.out
index 610b15f..d004e64 100644
--- a/ql/src/test/results/clientpositive/multi_insert_gby3.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_gby3.q.out
@@ -111,7 +111,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -124,7 +125,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM (select key, cast(key as double) as keyD, value from src order by key) a
@@ -215,7 +217,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -228,7 +231,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key, cast(key as double) as keyD, value from src order by key) a
 INSERT OVERWRITE TABLE e1
@@ -1635,7 +1639,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -1677,7 +1682,8 @@ STAGE PLANS:
               name: default.e3
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM (select key, cast(key as double) as keyD, value from src order by key) a
@@ -1787,7 +1793,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1800,7 +1807,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-6
     Map Reduce
@@ -1842,5 +1850,6 @@ STAGE PLANS:
               name: default.e3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multi_insert_gby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_gby4.q.out b/ql/src/test/results/clientpositive/multi_insert_gby4.q.out
index 2f59665..2b464d3 100644
--- a/ql/src/test/results/clientpositive/multi_insert_gby4.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_gby4.q.out
@@ -146,7 +146,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -159,7 +160,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -172,7 +174,8 @@ STAGE PLANS:
               name: default.e3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (SELECT key, value FROM src) a
 INSERT OVERWRITE TABLE e1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multi_insert_mixed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_mixed.q.out b/ql/src/test/results/clientpositive/multi_insert_mixed.q.out
index 9acae2e..05c030e 100644
--- a/ql/src/test/results/clientpositive/multi_insert_mixed.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_mixed.q.out
@@ -148,7 +148,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-6
     Map Reduce
@@ -208,7 +209,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -221,7 +223,8 @@ STAGE PLANS:
               name: default.src_multi3
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select key, count(1) group by key order by key


[22/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ec9cc0bc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ec9cc0bc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ec9cc0bc

Branch: refs/heads/master
Commit: ec9cc0bc2967daa42a1061cc0dfc297afe223a5e
Parents: 9454042
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Wed Sep 13 06:02:00 2017 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue Nov 7 14:25:19 2017 -0800

----------------------------------------------------------------------
 .../accumulo_single_sourced_multi_insert.q.out  |    3 +-
 .../hadoop/hive/common/jsonexplain/Vertex.java  |    3 +-
 .../clientpositive/serde_typedbytes.q.out       |    3 +-
 .../clientpositive/serde_typedbytes2.q.out      |    3 +-
 .../clientpositive/serde_typedbytes3.q.out      |    3 +-
 .../clientpositive/serde_typedbytes4.q.out      |    3 +-
 .../clientpositive/serde_typedbytes5.q.out      |    3 +-
 data/scripts/q_test_init_src.sql                |    5 +
 .../hbase_single_sourced_multi_insert.q.out     |    3 +-
 .../test/results/clientpositive/explain.q.out   |    3 +-
 .../insert_into_dynamic_partitions.q.out        |    5 +-
 .../clientpositive/insert_into_table.q.out      |    5 +-
 .../insert_overwrite_dynamic_partitions.q.out   |    5 +-
 .../clientpositive/insert_overwrite_table.q.out |    5 +-
 .../write_final_output_blobstore.q.out          |    8 +-
 .../apache/hadoop/hive/ql/TestMTQueries.java    |    1 +
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   |    1 +
 .../test/resources/testconfiguration.properties |    5 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   69 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |   25 +-
 pom.xml                                         |    1 +
 .../apache/hadoop/hive/ql/DriverContext.java    |    8 +-
 .../hadoop/hive/ql/exec/ColumnStatsTask.java    |  451 -----
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |    5 +-
 .../hadoop/hive/ql/exec/StatsNoJobTask.java     |  396 -----
 .../apache/hadoop/hive/ql/exec/StatsTask.java   |  502 +-----
 .../org/apache/hadoop/hive/ql/exec/Task.java    |    2 +
 .../apache/hadoop/hive/ql/exec/TaskFactory.java |    9 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |   48 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   12 +-
 .../ql/metadata/SessionHiveMetaStoreClient.java |   22 +-
 .../hive/ql/optimizer/GenMRTableScan1.java      |   49 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |   60 +-
 .../ql/optimizer/QueryPlanPostProcessor.java    |   52 +-
 .../ql/optimizer/physical/MemoryDecider.java    |   10 +-
 .../ql/optimizer/physical/SerializeFilter.java  |    2 +-
 .../ql/optimizer/physical/SkewJoinResolver.java |   11 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |   20 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |   37 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |    6 -
 .../hadoop/hive/ql/parse/GenTezUtils.java       |    4 -
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |    4 +-
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |   21 +-
 .../hive/ql/parse/ProcessAnalyzeTable.java      |   96 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |    8 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |  169 +-
 .../parse/spark/SparkProcessAnalyzeTable.java   |   38 +-
 .../hive/ql/plan/BasicStatsNoJobWork.java       |   63 +
 .../hadoop/hive/ql/plan/BasicStatsWork.java     |  196 +++
 .../hadoop/hive/ql/plan/ColumnStatsDesc.java    |   27 +-
 .../hadoop/hive/ql/plan/ColumnStatsWork.java    |   97 --
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |    8 +-
 .../hadoop/hive/ql/plan/IStatsGatherDesc.java   |   28 +
 .../hadoop/hive/ql/plan/LoadFileDesc.java       |   24 +-
 .../hadoop/hive/ql/plan/StatsNoJobWork.java     |   70 -
 .../apache/hadoop/hive/ql/plan/StatsWork.java   |  171 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      |    5 +-
 .../hive/ql/stats/BasicStatsNoJobTask.java      |  385 +++++
 .../hadoop/hive/ql/stats/BasicStatsTask.java    |  499 ++++++
 .../hadoop/hive/ql/stats/ColStatsProcessor.java |  188 +++
 .../ql/stats/ColumnStatisticsObjTranslator.java |  293 ++++
 .../hadoop/hive/ql/stats/IStatsProcessor.java   |   36 +
 .../apache/hadoop/hive/ql/stats/Partish.java    |  186 +++
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |    2 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |    1 +
 .../hadoop/hive/ql/TxnCommandsBaseForTests.java |    1 +
 .../TestHiveReduceExpressionsWithStatsRule.java |    2 +-
 .../queries/clientpositive/autoColumnStats_1.q  |    4 +
 .../queries/clientpositive/autoColumnStats_10.q |   52 +
 .../queries/clientpositive/autoColumnStats_5a.q |   30 +
 .../queries/clientpositive/basicstat_partval.q  |   12 +
 .../clientpositive/columnstats_partlvl.q        |    2 +
 .../clientpositive/columnstats_partlvl_dp.q     |    2 +
 .../test/queries/clientpositive/deleteAnalyze.q |    2 +
 .../clientpositive/exec_parallel_column_stats.q |    6 +-
 .../clientpositive/outer_reference_windowed.q   |    2 +
 .../test/queries/clientpositive/smb_mapjoin_1.q |    3 +
 .../temp_table_display_colstats_tbllvl.q        |    4 +
 .../clientpositive/acid_table_stats.q.out       |   10 +
 .../clientpositive/alterColumnStatsPart.q.out   |   12 +
 .../alter_partition_update_status.q.out         |    8 +
 .../alter_table_column_stats.q.out              |   16 +
 .../alter_table_update_status.q.out             |    8 +-
 ..._table_update_status_disable_bitvector.q.out |    8 +-
 .../clientpositive/analyze_tbl_date.q.out       |    2 +
 .../clientpositive/analyze_tbl_part.q.out       |   20 +
 .../annotate_stats_deep_filters.q.out           |    2 +
 .../clientpositive/annotate_stats_filter.q.out  |    2 +
 .../clientpositive/annotate_stats_groupby.q.out |    4 +
 .../annotate_stats_groupby2.q.out               |    2 +
 .../clientpositive/annotate_stats_join.q.out    |    6 +
 .../annotate_stats_join_pkfk.q.out              |    8 +
 .../clientpositive/annotate_stats_limit.q.out   |    2 +
 .../clientpositive/annotate_stats_part.q.out    |    4 +
 .../clientpositive/annotate_stats_select.q.out  |    2 +
 .../clientpositive/annotate_stats_table.q.out   |    4 +
 .../clientpositive/annotate_stats_union.q.out   |    6 +
 .../clientpositive/autoColumnStats_1.q.out      | 1433 ++++++++++++++++
 .../clientpositive/autoColumnStats_10.q.out     |  516 ++++++
 .../clientpositive/autoColumnStats_2.q.out      | 1557 ++++++++++++++++++
 .../clientpositive/autoColumnStats_3.q.out      |   10 +-
 .../clientpositive/autoColumnStats_4.q.out      |   11 +-
 .../clientpositive/autoColumnStats_5.q.out      |   37 +-
 .../clientpositive/autoColumnStats_5a.q.out     |  997 +++++++++++
 .../clientpositive/autoColumnStats_6.q.out      |    7 +-
 .../clientpositive/autoColumnStats_7.q.out      |    9 +-
 .../clientpositive/autoColumnStats_8.q.out      |   31 +-
 .../clientpositive/autoColumnStats_9.q.out      |    9 +-
 .../results/clientpositive/auto_join1.q.out     |    3 +-
 .../results/clientpositive/auto_join14.q.out    |    3 +-
 .../results/clientpositive/auto_join17.q.out    |    3 +-
 .../results/clientpositive/auto_join19.q.out    |    3 +-
 .../clientpositive/auto_join19_inclause.q.out   |    3 +-
 .../results/clientpositive/auto_join2.q.out     |    3 +-
 .../results/clientpositive/auto_join26.q.out    |    3 +-
 .../results/clientpositive/auto_join3.q.out     |    3 +-
 .../results/clientpositive/auto_join4.q.out     |    3 +-
 .../results/clientpositive/auto_join5.q.out     |    3 +-
 .../results/clientpositive/auto_join6.q.out     |    3 +-
 .../results/clientpositive/auto_join7.q.out     |    3 +-
 .../results/clientpositive/auto_join8.q.out     |    3 +-
 .../results/clientpositive/auto_join9.q.out     |    3 +-
 .../clientpositive/auto_sortmerge_join_13.q.out |   18 +-
 .../results/clientpositive/avro_decimal.q.out   |    4 +-
 .../clientpositive/avro_decimal_native.q.out    |    4 +-
 .../clientpositive/basicstat_partval.q.out      |  132 ++
 .../beeline/colstats_all_nulls.q.out            |    2 +
 .../clientpositive/beeline/smb_mapjoin_1.q.out  |   45 +
 .../clientpositive/beeline/smb_mapjoin_11.q.out |    3 +-
 .../clientpositive/beeline/smb_mapjoin_12.q.out |    6 +-
 .../clientpositive/beeline/smb_mapjoin_7.q.out  |    3 +-
 .../clientpositive/binary_output_format.q.out   |    3 +-
 .../test/results/clientpositive/bucket1.q.out   |    3 +-
 .../test/results/clientpositive/bucket2.q.out   |    3 +-
 .../test/results/clientpositive/bucket3.q.out   |    3 +-
 .../clientpositive/bucket_map_join_spark1.q.out |    6 +-
 .../clientpositive/bucket_map_join_spark2.q.out |    6 +-
 .../clientpositive/bucket_map_join_spark3.q.out |    6 +-
 .../results/clientpositive/bucketmapjoin5.q.out |    6 +-
 .../clientpositive/bucketmapjoin_negative.q.out |    3 +-
 .../bucketmapjoin_negative2.q.out               |    3 +-
 .../bucketsortoptimize_insert_1.q.out           |   15 +-
 .../bucketsortoptimize_insert_3.q.out           |    6 +-
 .../bucketsortoptimize_insert_4.q.out           |    6 +-
 .../bucketsortoptimize_insert_5.q.out           |    6 +-
 .../bucketsortoptimize_insert_8.q.out           |    6 +-
 .../clientpositive/case_sensitivity.q.out       |    3 +-
 ql/src/test/results/clientpositive/cast1.q.out  |    3 +-
 .../cbo_rp_annotate_stats_groupby.q.out         |    4 +
 .../clientpositive/cbo_rp_auto_join1.q.out      |    4 +
 .../clientpositive/cbo_rp_auto_join17.q.out     |    3 +-
 .../cbo_rp_gby2_map_multi_distinct.q.out        |    6 +-
 .../cbo_rp_groupby3_noskew_multi_distinct.q.out |    3 +-
 .../clientpositive/colstats_all_nulls.q.out     |    2 +
 .../columnStatsUpdateForStatsOptimizer_2.q.out  |    2 +
 .../column_pruner_multiple_children.q.out       |    9 +-
 .../columnarserde_create_shortcut.q.out         |    3 +-
 .../clientpositive/columnstats_infinity.q.out   |    4 +-
 .../clientpositive/columnstats_partlvl.q.out    |  133 +-
 .../clientpositive/columnstats_partlvl_dp.q.out |  145 +-
 .../clientpositive/columnstats_quoting.q.out    |   10 +-
 .../clientpositive/columnstats_tbllvl.q.out     |   63 +-
 .../results/clientpositive/compustat_avro.q.out |    2 +
 .../clientpositive/compute_stats_date.q.out     |    9 +-
 .../test/results/clientpositive/constGby.q.out  |    2 +
 .../clientpositive/constant_prop_2.q.out        |    3 +-
 .../clientpositive/constant_prop_3.q.out        |    6 +
 .../results/clientpositive/constprog_dp.q.out   |    3 +-
 .../results/clientpositive/constprog_type.q.out |    3 +-
 .../clientpositive/correlated_join_keys.q.out   |    2 +
 .../clientpositive/correlationoptimizer5.q.out  |    9 +-
 ql/src/test/results/clientpositive/cp_sel.q.out |    3 +-
 ql/src/test/results/clientpositive/ctas.q.out   |   15 +-
 .../results/clientpositive/ctas_colname.q.out   |   21 +-
 .../ctas_uses_database_location.q.out           |    3 +-
 .../results/clientpositive/decimal_stats.q.out  |    2 +
 .../results/clientpositive/deleteAnalyze.q.out  |   56 +-
 .../display_colstats_tbllvl.q.out               |   35 +-
 .../results/clientpositive/distinct_stats.q.out |    2 +
 .../clientpositive/drop_table_with_stats.q.out  |   12 +
 .../dynpart_sort_optimization_acid2.q.out       |    3 +-
 .../encryption_join_unencrypted_tbl.q.out       |   18 +-
 .../encrypted/encryption_move_tbl.q.out         |    2 +
 .../exec_parallel_column_stats.q.out            |   33 +-
 .../results/clientpositive/explain_ddl.q.out    |   15 +-
 .../extrapolate_part_stats_date.q.out           |    8 +
 .../extrapolate_part_stats_full.q.out           |   24 +
 .../extrapolate_part_stats_partial.q.out        |   24 +
 .../test/results/clientpositive/fm-sketch.q.out |   16 +-
 .../test/results/clientpositive/groupby1.q.out  |    3 +-
 .../test/results/clientpositive/groupby10.q.out |   18 +-
 .../test/results/clientpositive/groupby11.q.out |    6 +-
 .../test/results/clientpositive/groupby12.q.out |    3 +-
 .../results/clientpositive/groupby1_limit.q.out |    3 +-
 .../results/clientpositive/groupby1_map.q.out   |    3 +-
 .../clientpositive/groupby1_map_nomap.q.out     |    3 +-
 .../clientpositive/groupby1_map_skew.q.out      |    3 +-
 .../clientpositive/groupby1_noskew.q.out        |    3 +-
 .../results/clientpositive/groupby2_map.q.out   |    3 +-
 .../groupby2_map_multi_distinct.q.out           |    6 +-
 .../clientpositive/groupby2_map_skew.q.out      |    3 +-
 .../clientpositive/groupby2_noskew.q.out        |    3 +-
 .../groupby2_noskew_multi_distinct.q.out        |    3 +-
 .../test/results/clientpositive/groupby3.q.out  |    3 +-
 .../results/clientpositive/groupby3_map.q.out   |    3 +-
 .../groupby3_map_multi_distinct.q.out           |    3 +-
 .../clientpositive/groupby3_map_skew.q.out      |    3 +-
 .../clientpositive/groupby3_noskew.q.out        |    3 +-
 .../groupby3_noskew_multi_distinct.q.out        |    3 +-
 .../test/results/clientpositive/groupby4.q.out  |    3 +-
 .../results/clientpositive/groupby4_map.q.out   |    3 +-
 .../clientpositive/groupby4_map_skew.q.out      |    3 +-
 .../clientpositive/groupby4_noskew.q.out        |    3 +-
 .../test/results/clientpositive/groupby5.q.out  |    3 +-
 .../results/clientpositive/groupby5_map.q.out   |    3 +-
 .../clientpositive/groupby5_map_skew.q.out      |    3 +-
 .../clientpositive/groupby5_noskew.q.out        |    3 +-
 .../test/results/clientpositive/groupby6.q.out  |    3 +-
 .../results/clientpositive/groupby6_map.q.out   |    3 +-
 .../clientpositive/groupby6_map_skew.q.out      |    3 +-
 .../clientpositive/groupby6_noskew.q.out        |    3 +-
 .../results/clientpositive/groupby7_map.q.out   |    6 +-
 .../groupby7_map_multi_single_reducer.q.out     |    6 +-
 .../clientpositive/groupby7_map_skew.q.out      |    6 +-
 .../clientpositive/groupby7_noskew.q.out        |    6 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |    6 +-
 .../test/results/clientpositive/groupby8.q.out  |   12 +-
 .../results/clientpositive/groupby8_map.q.out   |    6 +-
 .../clientpositive/groupby8_map_skew.q.out      |    6 +-
 .../clientpositive/groupby8_noskew.q.out        |    6 +-
 .../test/results/clientpositive/groupby9.q.out  |   30 +-
 .../clientpositive/groupby_complex_types.q.out  |    9 +-
 ...pby_complex_types_multi_single_reducer.q.out |    6 +-
 .../results/clientpositive/groupby_cube1.q.out  |    6 +-
 .../clientpositive/groupby_cube_multi_gby.q.out |    6 +-
 .../clientpositive/groupby_duplicate_key.q.out  |    3 +-
 .../clientpositive/groupby_map_ppr.q.out        |    3 +-
 .../groupby_map_ppr_multi_distinct.q.out        |    3 +-
 .../groupby_multi_insert_common_distinct.q.out  |    6 +-
 .../groupby_multi_single_reducer.q.out          |   24 +-
 .../groupby_multi_single_reducer2.q.out         |    6 +-
 .../groupby_multi_single_reducer3.q.out         |   24 +-
 .../clientpositive/groupby_position.q.out       |   12 +-
 .../results/clientpositive/groupby_ppr.q.out    |    3 +-
 .../groupby_ppr_multi_distinct.q.out            |    6 +-
 .../clientpositive/groupby_rollup1.q.out        |    6 +-
 .../clientpositive/groupby_sort_1_23.q.out      |   60 +-
 .../results/clientpositive/groupby_sort_2.q.out |    3 +-
 .../results/clientpositive/groupby_sort_3.q.out |    6 +-
 .../results/clientpositive/groupby_sort_4.q.out |    6 +-
 .../results/clientpositive/groupby_sort_5.q.out |    9 +-
 .../results/clientpositive/groupby_sort_6.q.out |    9 +-
 .../results/clientpositive/groupby_sort_7.q.out |    3 +-
 .../clientpositive/groupby_sort_skew_1_23.q.out |   60 +-
 .../clientpositive/groupby_sort_test_1.q.out    |    3 +-
 ql/src/test/results/clientpositive/hll.q.out    |   16 +-
 .../implicit_cast_during_insert.q.out           |    3 +-
 .../clientpositive/index_auto_update.q.out      |    6 +-
 .../infer_bucket_sort_dyn_part.q.out            |    3 +-
 .../infer_bucket_sort_grouping_operators.q.out  |    9 +-
 .../infer_bucket_sort_map_operators.q.out       |   12 +-
 .../infer_bucket_sort_num_buckets.q.out         |    3 +-
 .../test/results/clientpositive/innerjoin.q.out |    3 +-
 .../test/results/clientpositive/input11.q.out   |    3 +-
 .../results/clientpositive/input11_limit.q.out  |    3 +-
 .../test/results/clientpositive/input12.q.out   |    9 +-
 .../test/results/clientpositive/input13.q.out   |    9 +-
 .../test/results/clientpositive/input14.q.out   |    3 +-
 .../results/clientpositive/input14_limit.q.out  |    3 +-
 .../test/results/clientpositive/input17.q.out   |    3 +-
 .../test/results/clientpositive/input18.q.out   |    3 +-
 .../results/clientpositive/input1_limit.q.out   |    6 +-
 .../test/results/clientpositive/input20.q.out   |    3 +-
 .../test/results/clientpositive/input30.q.out   |    6 +-
 .../test/results/clientpositive/input31.q.out   |    3 +-
 .../test/results/clientpositive/input32.q.out   |    3 +-
 .../test/results/clientpositive/input33.q.out   |    3 +-
 .../test/results/clientpositive/input34.q.out   |    3 +-
 .../test/results/clientpositive/input35.q.out   |    3 +-
 .../test/results/clientpositive/input36.q.out   |    3 +-
 .../test/results/clientpositive/input38.q.out   |    3 +-
 .../results/clientpositive/input3_limit.q.out   |    3 +-
 ql/src/test/results/clientpositive/input4.q.out |    3 +-
 ql/src/test/results/clientpositive/input5.q.out |    3 +-
 ql/src/test/results/clientpositive/input6.q.out |    3 +-
 ql/src/test/results/clientpositive/input7.q.out |    3 +-
 ql/src/test/results/clientpositive/input8.q.out |    3 +-
 ql/src/test/results/clientpositive/input9.q.out |    3 +-
 .../clientpositive/input_columnarserde.q.out    |    3 +-
 .../clientpositive/input_dynamicserde.q.out     |    3 +-
 .../clientpositive/input_lazyserde.q.out        |    3 +-
 .../clientpositive/input_lazyserde2.q.out       |    3 +-
 .../results/clientpositive/input_part1.q.out    |    3 +-
 .../results/clientpositive/input_part10.q.out   |    3 +-
 .../results/clientpositive/input_part2.q.out    |    6 +-
 .../results/clientpositive/input_part5.q.out    |    3 +-
 .../clientpositive/input_testsequencefile.q.out |    3 +-
 .../clientpositive/input_testxpath.q.out        |    3 +-
 .../clientpositive/input_testxpath2.q.out       |    3 +-
 .../test/results/clientpositive/insert1.q.out   |   18 +-
 .../insert1_overwrite_partitions.q.out          |    6 +-
 .../insert2_overwrite_partitions.q.out          |    6 +-
 .../results/clientpositive/insert_into1.q.out   |   15 +-
 .../results/clientpositive/insert_into2.q.out   |    9 +-
 .../results/clientpositive/insert_into3.q.out   |   12 +-
 .../results/clientpositive/insert_into4.q.out   |    9 +-
 .../results/clientpositive/insert_into5.q.out   |   12 +-
 .../results/clientpositive/insert_into6.q.out   |    6 +-
 .../insert_values_orig_table_use_metadata.q.out |    5 +
 .../clientpositive/insertoverwrite_bucket.q.out |    6 +-
 ql/src/test/results/clientpositive/join14.q.out |    3 +-
 ql/src/test/results/clientpositive/join17.q.out |    3 +-
 ql/src/test/results/clientpositive/join2.q.out  |    3 +-
 ql/src/test/results/clientpositive/join25.q.out |    3 +-
 ql/src/test/results/clientpositive/join26.q.out |    3 +-
 ql/src/test/results/clientpositive/join27.q.out |    3 +-
 ql/src/test/results/clientpositive/join28.q.out |    3 +-
 ql/src/test/results/clientpositive/join29.q.out |    3 +-
 ql/src/test/results/clientpositive/join3.q.out  |    3 +-
 ql/src/test/results/clientpositive/join30.q.out |    3 +-
 ql/src/test/results/clientpositive/join31.q.out |    3 +-
 ql/src/test/results/clientpositive/join32.q.out |    3 +-
 ql/src/test/results/clientpositive/join33.q.out |    3 +-
 ql/src/test/results/clientpositive/join34.q.out |    3 +-
 ql/src/test/results/clientpositive/join35.q.out |    3 +-
 ql/src/test/results/clientpositive/join36.q.out |    3 +-
 ql/src/test/results/clientpositive/join37.q.out |    3 +-
 ql/src/test/results/clientpositive/join39.q.out |    3 +-
 ql/src/test/results/clientpositive/join4.q.out  |    3 +-
 ql/src/test/results/clientpositive/join5.q.out  |    3 +-
 ql/src/test/results/clientpositive/join6.q.out  |    3 +-
 ql/src/test/results/clientpositive/join7.q.out  |    3 +-
 ql/src/test/results/clientpositive/join8.q.out  |    3 +-
 ql/src/test/results/clientpositive/join9.q.out  |    3 +-
 .../results/clientpositive/join_map_ppr.q.out   |    6 +-
 .../limit_pushdown_negative.q.out               |    6 +-
 .../test/results/clientpositive/lineage1.q.out  |    3 +-
 .../clientpositive/list_bucket_dml_1.q.out      |    3 +-
 .../clientpositive/list_bucket_dml_11.q.out     |    3 +-
 .../clientpositive/list_bucket_dml_12.q.out     |    3 +-
 .../clientpositive/list_bucket_dml_13.q.out     |    3 +-
 .../clientpositive/list_bucket_dml_14.q.out     |    3 +-
 .../clientpositive/list_bucket_dml_2.q.out      |    3 +-
 .../clientpositive/list_bucket_dml_3.q.out      |    3 +-
 .../clientpositive/list_bucket_dml_4.q.out      |    6 +-
 .../clientpositive/list_bucket_dml_5.q.out      |    3 +-
 .../clientpositive/list_bucket_dml_6.q.out      |    6 +-
 .../clientpositive/list_bucket_dml_7.q.out      |    6 +-
 .../clientpositive/list_bucket_dml_8.q.out      |    3 +-
 .../clientpositive/list_bucket_dml_9.q.out      |    6 +-
 .../llap/acid_bucket_pruning.q.out              |    2 +
 .../clientpositive/llap/acid_no_buckets.q.out   |   12 +-
 .../llap/acid_vectorization_original.q.out      |    3 +-
 .../clientpositive/llap/autoColumnStats_1.q.out |   74 +
 .../llap/autoColumnStats_10.q.out               |  516 ++++++
 .../clientpositive/llap/autoColumnStats_2.q.out |    1 -
 .../clientpositive/llap/auto_join1.q.out        |    3 +-
 .../llap/auto_smb_mapjoin_14.q.out              |   12 +-
 .../llap/auto_sortmerge_join_13.q.out           |   18 +-
 .../results/clientpositive/llap/bucket2.q.out   |    3 +-
 .../results/clientpositive/llap/bucket3.q.out   |    3 +-
 .../results/clientpositive/llap/bucket4.q.out   |    3 +-
 .../results/clientpositive/llap/bucket5.q.out   |    6 +-
 .../results/clientpositive/llap/bucket6.q.out   |    3 +-
 .../clientpositive/llap/bucket_many.q.out       |    3 +-
 .../clientpositive/llap/bucketmapjoin1.q.out    |    6 +-
 .../clientpositive/llap/bucketmapjoin2.q.out    |    9 +-
 .../clientpositive/llap/bucketmapjoin3.q.out    |    6 +-
 .../clientpositive/llap/bucketmapjoin4.q.out    |    6 +-
 .../llap/bucketsortoptimize_insert_2.q.out      |   18 +-
 .../llap/bucketsortoptimize_insert_6.q.out      |   21 +-
 .../llap/bucketsortoptimize_insert_7.q.out      |    9 +-
 .../llap/column_table_stats.q.out               |   36 +-
 .../llap/column_table_stats_orc.q.out           |   21 +-
 .../test/results/clientpositive/llap/ctas.q.out |   15 +-
 .../clientpositive/llap/deleteAnalyze.q.out     |   48 +-
 .../llap/disable_merge_for_bucketing.q.out      |    3 +-
 .../llap/dynamic_partition_pruning.q.out        |    3 +-
 .../llap/dynamic_semijoin_reduction_3.q.out     |   27 +-
 .../llap/dynpart_sort_opt_vectorization.q.out   |   45 +-
 .../llap/dynpart_sort_optimization.q.out        |   66 +-
 .../llap/dynpart_sort_optimization2.q.out       |   24 +-
 .../llap/dynpart_sort_optimization_acid.q.out   |   36 +-
 .../clientpositive/llap/explainuser_1.q.out     |   14 +-
 .../clientpositive/llap/explainuser_2.q.out     |   24 +-
 .../results/clientpositive/llap/groupby1.q.out  |    3 +-
 .../results/clientpositive/llap/groupby2.q.out  |    3 +-
 .../results/clientpositive/llap/groupby3.q.out  |    3 +-
 .../results/clientpositive/llap/insert1.q.out   |   18 +-
 .../clientpositive/llap/insert_into1.q.out      |   15 +-
 .../clientpositive/llap/insert_into2.q.out      |    9 +-
 .../results/clientpositive/llap/join1.q.out     |    3 +-
 .../clientpositive/llap/join32_lessSize.q.out   |   18 +-
 .../llap/list_bucket_dml_10.q.out               |    3 +-
 .../clientpositive/llap/llap_stats.q.out        |    7 +-
 .../clientpositive/llap/load_dyn_part1.q.out    |    6 +-
 .../clientpositive/llap/load_dyn_part2.q.out    |    3 +-
 .../clientpositive/llap/load_dyn_part3.q.out    |    3 +-
 .../clientpositive/llap/load_dyn_part5.q.out    |    3 +-
 .../clientpositive/llap/mapreduce1.q.out        |    3 +-
 .../clientpositive/llap/mapreduce2.q.out        |    3 +-
 .../results/clientpositive/llap/merge1.q.out    |    9 +-
 .../results/clientpositive/llap/merge2.q.out    |    9 +-
 .../results/clientpositive/llap/mm_all.q.out    |    7 +-
 .../clientpositive/llap/multi_insert.q.out      |   72 +-
 .../llap/multi_insert_lateral_view.q.out        |   42 +-
 .../clientpositive/llap/orc_merge1.q.out        |    9 +-
 .../clientpositive/llap/orc_merge10.q.out       |   12 +-
 .../clientpositive/llap/orc_merge2.q.out        |    3 +-
 .../clientpositive/llap/orc_merge3.q.out        |    3 +-
 .../clientpositive/llap/orc_merge4.q.out        |    3 +-
 .../clientpositive/llap/orc_merge5.q.out        |    9 +-
 .../clientpositive/llap/orc_merge6.q.out        |    9 +-
 .../clientpositive/llap/orc_merge7.q.out        |    9 +-
 .../clientpositive/llap/orc_merge_diff_fs.q.out |    9 +-
 .../llap/orc_merge_incompat1.q.out              |    3 +-
 .../llap/orc_merge_incompat2.q.out              |    6 +-
 .../results/clientpositive/llap/parallel.q.out  |    6 +-
 .../clientpositive/llap/parallel_colstats.q.out |   22 +-
 .../test/results/clientpositive/llap/ptf.q.out  |    6 +-
 .../clientpositive/llap/rcfile_createas1.q.out  |    3 +-
 .../clientpositive/llap/rcfile_merge2.q.out     |    3 +-
 .../clientpositive/llap/rcfile_merge3.q.out     |    3 +-
 .../clientpositive/llap/rcfile_merge4.q.out     |    3 +-
 .../llap/reduce_deduplicate.q.out               |    6 +-
 .../results/clientpositive/llap/sample1.q.out   |    3 +-
 .../results/clientpositive/llap/skewjoin.q.out  |    3 +-
 .../clientpositive/llap/smb_mapjoin_18.q.out    |    9 +-
 .../clientpositive/llap/smb_mapjoin_19.q.out    |    3 +-
 .../clientpositive/llap/smb_mapjoin_6.q.out     |   12 +-
 .../results/clientpositive/llap/sqlmerge.q.out  |   15 +-
 .../results/clientpositive/llap/stats11.q.out   |    9 +-
 .../clientpositive/llap/stats_noscan_1.q.out    |    6 +-
 .../clientpositive/llap/temp_table.q.out        |    6 +-
 .../results/clientpositive/llap/tez_dml.q.out   |   12 +-
 .../llap/tez_join_result_complex.q.out          |    6 +-
 .../clientpositive/llap/tez_nway_join.q.out     |   90 +-
 .../llap/tez_union_dynamic_partition.q.out      |    3 +-
 .../llap/tez_union_dynamic_partition_2.q.out    |    3 +-
 .../llap/tez_union_multiinsert.q.out            |   30 +-
 .../results/clientpositive/llap/union4.q.out    |    3 +-
 .../results/clientpositive/llap/union6.q.out    |    3 +-
 .../clientpositive/llap/unionDistinct_1.q.out   |   69 +-
 .../clientpositive/llap/union_stats.q.out       |    3 +-
 .../clientpositive/llap/union_top_level.q.out   |    9 +-
 .../llap/vector_auto_smb_mapjoin_14.q.out       |   12 +-
 .../clientpositive/llap/vector_bucket.q.out     |    3 +-
 .../clientpositive/llap/vector_char_4.q.out     |    3 +-
 .../llap/vector_complex_all.q.out               |    3 +-
 .../clientpositive/llap/vector_decimal_6.q.out  |    3 +-
 .../clientpositive/llap/vector_groupby4.q.out   |    3 +-
 .../clientpositive/llap/vector_groupby6.q.out   |    3 +-
 .../llap/vector_groupby_cube1.q.out             |    6 +-
 .../llap/vector_groupby_rollup1.q.out           |    6 +-
 .../llap/vector_multi_insert.q.out              |    9 +-
 .../llap/vector_udf_character_length.q.out      |    3 +-
 .../llap/vector_udf_octet_length.q.out          |    3 +-
 .../clientpositive/llap/vector_varchar_4.q.out  |    3 +-
 .../llap/vector_varchar_simple.q.out            |    3 +-
 .../clientpositive/llap/vector_windowing.q.out  |    9 +-
 .../llap/vector_windowing_expressions.q.out     |    6 +-
 .../llap/vector_windowing_streaming.q.out       |    3 +-
 .../vectorized_dynamic_partition_pruning.q.out  |    3 +-
 .../clientpositive/llap/vectorized_ptf.q.out    |    6 +-
 .../results/clientpositive/load_dyn_part1.q.out |    6 +-
 .../clientpositive/load_dyn_part10.q.out        |    3 +-
 .../clientpositive/load_dyn_part13.q.out        |    3 +-
 .../clientpositive/load_dyn_part14.q.out        |    3 +-
 .../results/clientpositive/load_dyn_part2.q.out |    3 +-
 .../results/clientpositive/load_dyn_part3.q.out |    3 +-
 .../results/clientpositive/load_dyn_part4.q.out |    3 +-
 .../results/clientpositive/load_dyn_part8.q.out |    6 +-
 .../results/clientpositive/load_dyn_part9.q.out |    3 +-
 .../results/clientpositive/mapreduce1.q.out     |    3 +-
 .../results/clientpositive/mapreduce2.q.out     |    3 +-
 .../results/clientpositive/mapreduce3.q.out     |    3 +-
 .../results/clientpositive/mapreduce4.q.out     |    3 +-
 .../results/clientpositive/mapreduce5.q.out     |    3 +-
 .../results/clientpositive/mapreduce6.q.out     |    3 +-
 .../results/clientpositive/mapreduce7.q.out     |    3 +-
 .../results/clientpositive/mapreduce8.q.out     |    3 +-
 .../results/clientpositive/masking_11.q.out     |    2 +
 .../materialized_view_rewrite_ssb.q.out         |   10 +
 .../materialized_view_rewrite_ssb_2.q.out       |   10 +
 ql/src/test/results/clientpositive/merge1.q.out |    9 +-
 ql/src/test/results/clientpositive/merge2.q.out |    9 +-
 ql/src/test/results/clientpositive/merge3.q.out |    9 +-
 ql/src/test/results/clientpositive/merge4.q.out |    9 +-
 .../merge_dynamic_partition.q.out               |    9 +-
 .../merge_dynamic_partition2.q.out              |    3 +-
 .../merge_dynamic_partition3.q.out              |    3 +-
 .../merge_dynamic_partition4.q.out              |    3 +-
 .../merge_dynamic_partition5.q.out              |    3 +-
 .../clientpositive/metadata_only_queries.q.out  |   14 +
 .../metadata_only_queries_with_filters.q.out    |    8 +
 ql/src/test/results/clientpositive/mm_all.q.out |    7 +-
 .../clientpositive/multi_insert_gby.q.out       |   12 +-
 .../clientpositive/multi_insert_gby2.q.out      |    6 +-
 .../clientpositive/multi_insert_gby3.q.out      |   27 +-
 .../clientpositive/multi_insert_gby4.q.out      |    9 +-
 .../clientpositive/multi_insert_mixed.q.out     |    9 +-
 ...i_insert_move_tasks_share_dependencies.q.out |   96 +-
 .../clientpositive/multi_insert_union_src.q.out |    6 +-
 .../multi_insert_with_join2.q.out               |   42 +-
 .../clientpositive/multigroupby_singlemr.q.out  |   33 +-
 .../results/clientpositive/nonmr_fetch.q.out    |    6 +-
 .../nonreserved_keywords_insert_into1.q.out     |    9 +-
 .../results/clientpositive/notable_alias1.q.out |    3 +-
 .../results/clientpositive/notable_alias2.q.out |    3 +-
 .../results/clientpositive/nullformatCTAS.q.out |    3 +-
 .../optimize_filter_literal.q.out               |    8 +
 .../results/clientpositive/orc_createas1.q.out  |    6 +-
 .../results/clientpositive/orc_merge1.q.out     |    9 +-
 .../results/clientpositive/orc_merge10.q.out    |   12 +-
 .../results/clientpositive/orc_merge2.q.out     |    3 +-
 .../results/clientpositive/orc_merge3.q.out     |    3 +-
 .../results/clientpositive/orc_merge4.q.out     |    3 +-
 .../results/clientpositive/orc_merge5.q.out     |    9 +-
 .../results/clientpositive/orc_merge6.q.out     |    9 +-
 .../clientpositive/orc_merge_diff_fs.q.out      |    9 +-
 .../clientpositive/orc_merge_incompat1.q.out    |    3 +-
 .../clientpositive/orc_merge_incompat2.q.out    |    6 +-
 .../outer_reference_windowed.q.out              |  122 +-
 .../test/results/clientpositive/parallel.q.out  |    6 +-
 .../clientpositive/parallel_colstats.q.out      |   30 +-
 .../results/clientpositive/parallel_join1.q.out |    3 +-
 .../clientpositive/parallel_orderby.q.out       |    3 +-
 .../clientpositive/partial_column_stats.q.out   |    5 +-
 .../partition_coltype_literals.q.out            |    4 +
 ql/src/test/results/clientpositive/pcr.q.out    |   12 +-
 ql/src/test/results/clientpositive/pcs.q.out    |    8 +
 .../clientpositive/ppd_constant_expr.q.out      |    6 +-
 .../clientpositive/ppd_multi_insert.q.out       |   18 +-
 ql/src/test/results/clientpositive/quote1.q.out |    3 +-
 .../results/clientpositive/quotedid_stats.q.out |    2 +
 .../clientpositive/rand_partitionpruner2.q.out  |    3 +-
 .../clientpositive/rcfile_null_value.q.out      |    3 +-
 .../clientpositive/remove_exprs_stats.q.out     |    6 +
 .../rename_external_partition_location.q.out    |    4 +
 .../rename_table_update_column_stats.q.out      |    4 +
 .../test/results/clientpositive/sample1.q.out   |    3 +-
 .../test/results/clientpositive/sample2.q.out   |    3 +-
 .../test/results/clientpositive/sample4.q.out   |    3 +-
 .../test/results/clientpositive/sample5.q.out   |    3 +-
 .../test/results/clientpositive/sample6.q.out   |    3 +-
 .../test/results/clientpositive/sample7.q.out   |    3 +-
 .../test/results/clientpositive/skewjoin.q.out  |    3 +-
 .../clientpositive/skewjoin_noskew.q.out        |    3 +-
 .../clientpositive/skewjoin_onesideskew.q.out   |    3 +-
 .../results/clientpositive/smb_mapjoin9.q.out   |    3 +-
 .../results/clientpositive/smb_mapjoin_1.q.out  |   43 +
 .../results/clientpositive/smb_mapjoin_11.q.out |    3 +-
 .../results/clientpositive/smb_mapjoin_12.q.out |    6 +-
 .../results/clientpositive/smb_mapjoin_20.q.out |    9 +-
 .../results/clientpositive/smb_mapjoin_21.q.out |   18 +-
 .../results/clientpositive/smb_mapjoin_22.q.out |    6 +-
 .../results/clientpositive/smb_mapjoin_7.q.out  |    3 +-
 .../spark/annotate_stats_join.q.out             |    6 +
 .../clientpositive/spark/auto_join1.q.out       |    3 +-
 .../clientpositive/spark/auto_join14.q.out      |    3 +-
 .../clientpositive/spark/auto_join17.q.out      |    3 +-
 .../clientpositive/spark/auto_join19.q.out      |    3 +-
 .../clientpositive/spark/auto_join2.q.out       |    3 +-
 .../clientpositive/spark/auto_join26.q.out      |    3 +-
 .../clientpositive/spark/auto_join3.q.out       |    3 +-
 .../clientpositive/spark/auto_join4.q.out       |    3 +-
 .../clientpositive/spark/auto_join5.q.out       |    3 +-
 .../clientpositive/spark/auto_join6.q.out       |    3 +-
 .../clientpositive/spark/auto_join7.q.out       |    3 +-
 .../clientpositive/spark/auto_join8.q.out       |    3 +-
 .../clientpositive/spark/auto_join9.q.out       |    3 +-
 .../spark/auto_smb_mapjoin_14.q.out             |   12 +-
 .../spark/auto_sortmerge_join_13.q.out          |   18 +-
 .../spark/avro_decimal_native.q.out             |    4 +-
 .../results/clientpositive/spark/bucket2.q.out  |    3 +-
 .../results/clientpositive/spark/bucket3.q.out  |    3 +-
 .../results/clientpositive/spark/bucket4.q.out  |    3 +-
 .../results/clientpositive/spark/bucket5.q.out  |    6 +-
 .../results/clientpositive/spark/bucket6.q.out  |    3 +-
 .../spark/bucket_map_join_spark1.q.out          |    6 +-
 .../spark/bucket_map_join_spark2.q.out          |    6 +-
 .../spark/bucket_map_join_spark3.q.out          |    6 +-
 .../spark/bucket_map_join_tez1.q.out            |  104 +-
 .../spark/bucket_map_join_tez2.q.out            |   22 +
 .../clientpositive/spark/bucketmapjoin1.q.out   |    6 +-
 .../clientpositive/spark/bucketmapjoin2.q.out   |    9 +-
 .../clientpositive/spark/bucketmapjoin3.q.out   |    6 +-
 .../clientpositive/spark/bucketmapjoin4.q.out   |    6 +-
 .../clientpositive/spark/bucketmapjoin5.q.out   |    6 +-
 .../clientpositive/spark/bucketmapjoin7.q.out   |    4 -
 .../spark/bucketmapjoin_negative.q.out          |    3 +-
 .../spark/bucketmapjoin_negative2.q.out         |    3 +-
 .../spark/bucketsortoptimize_insert_2.q.out     |   18 +-
 .../spark/bucketsortoptimize_insert_4.q.out     |    6 +-
 .../spark/bucketsortoptimize_insert_6.q.out     |   21 +-
 .../spark/bucketsortoptimize_insert_7.q.out     |    9 +-
 .../spark/bucketsortoptimize_insert_8.q.out     |    6 +-
 .../results/clientpositive/spark/ctas.q.out     |   15 +-
 .../spark/disable_merge_for_bucketing.q.out     |    3 +-
 .../spark/dynamic_rdd_cache.q.out               |    9 +-
 .../results/clientpositive/spark/groupby1.q.out |    3 +-
 .../clientpositive/spark/groupby10.q.out        |   18 +-
 .../clientpositive/spark/groupby11.q.out        |    6 +-
 .../clientpositive/spark/groupby1_map.q.out     |    3 +-
 .../spark/groupby1_map_nomap.q.out              |    3 +-
 .../spark/groupby1_map_skew.q.out               |    3 +-
 .../clientpositive/spark/groupby1_noskew.q.out  |    3 +-
 .../results/clientpositive/spark/groupby2.q.out |    3 +-
 .../clientpositive/spark/groupby2_map.q.out     |    3 +-
 .../spark/groupby2_map_multi_distinct.q.out     |    6 +-
 .../spark/groupby2_map_skew.q.out               |    3 +-
 .../clientpositive/spark/groupby2_noskew.q.out  |    3 +-
 .../spark/groupby2_noskew_multi_distinct.q.out  |    3 +-
 .../results/clientpositive/spark/groupby3.q.out |    3 +-
 .../clientpositive/spark/groupby3_map.q.out     |    3 +-
 .../spark/groupby3_map_multi_distinct.q.out     |    3 +-
 .../spark/groupby3_map_skew.q.out               |    3 +-
 .../clientpositive/spark/groupby3_noskew.q.out  |    3 +-
 .../spark/groupby3_noskew_multi_distinct.q.out  |    3 +-
 .../results/clientpositive/spark/groupby4.q.out |    3 +-
 .../clientpositive/spark/groupby4_map.q.out     |    3 +-
 .../spark/groupby4_map_skew.q.out               |    3 +-
 .../clientpositive/spark/groupby4_noskew.q.out  |    3 +-
 .../results/clientpositive/spark/groupby5.q.out |    3 +-
 .../clientpositive/spark/groupby5_map.q.out     |    3 +-
 .../spark/groupby5_map_skew.q.out               |    3 +-
 .../clientpositive/spark/groupby5_noskew.q.out  |    3 +-
 .../results/clientpositive/spark/groupby6.q.out |    3 +-
 .../clientpositive/spark/groupby6_map.q.out     |    3 +-
 .../spark/groupby6_map_skew.q.out               |    3 +-
 .../clientpositive/spark/groupby6_noskew.q.out  |    3 +-
 .../clientpositive/spark/groupby7_map.q.out     |    6 +-
 .../groupby7_map_multi_single_reducer.q.out     |    6 +-
 .../spark/groupby7_map_skew.q.out               |    6 +-
 .../clientpositive/spark/groupby7_noskew.q.out  |    6 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |    6 +-
 .../results/clientpositive/spark/groupby8.q.out |   12 +-
 .../clientpositive/spark/groupby8_map.q.out     |    6 +-
 .../spark/groupby8_map_skew.q.out               |    6 +-
 .../clientpositive/spark/groupby8_noskew.q.out  |    6 +-
 .../results/clientpositive/spark/groupby9.q.out |   30 +-
 .../spark/groupby_complex_types.q.out           |    9 +-
 ...pby_complex_types_multi_single_reducer.q.out |    6 +-
 .../clientpositive/spark/groupby_cube1.q.out    |    6 +-
 .../clientpositive/spark/groupby_map_ppr.q.out  |    3 +-
 .../spark/groupby_map_ppr_multi_distinct.q.out  |    3 +-
 .../groupby_multi_insert_common_distinct.q.out  |    6 +-
 .../spark/groupby_multi_single_reducer.q.out    |   24 +-
 .../spark/groupby_multi_single_reducer2.q.out   |    6 +-
 .../spark/groupby_multi_single_reducer3.q.out   |   24 +-
 .../clientpositive/spark/groupby_position.q.out |   12 +-
 .../clientpositive/spark/groupby_ppr.q.out      |    3 +-
 .../spark/groupby_ppr_multi_distinct.q.out      |    6 +-
 .../clientpositive/spark/groupby_rollup1.q.out  |    6 +-
 .../spark/groupby_sort_1_23.q.out               |   60 +-
 .../spark/groupby_sort_skew_1_23.q.out          |   60 +-
 .../spark/infer_bucket_sort_map_operators.q.out |   12 +-
 .../spark/infer_bucket_sort_num_buckets.q.out   |    3 +-
 .../clientpositive/spark/innerjoin.q.out        |    3 +-
 .../results/clientpositive/spark/input12.q.out  |    9 +-
 .../results/clientpositive/spark/input13.q.out  |    9 +-
 .../results/clientpositive/spark/input14.q.out  |    3 +-
 .../results/clientpositive/spark/input17.q.out  |    3 +-
 .../results/clientpositive/spark/input18.q.out  |    3 +-
 .../clientpositive/spark/input1_limit.q.out     |    6 +-
 .../clientpositive/spark/input_part2.q.out      |    6 +-
 .../results/clientpositive/spark/insert1.q.out  |   18 +-
 .../clientpositive/spark/insert_into1.q.out     |   15 +-
 .../clientpositive/spark/insert_into2.q.out     |    9 +-
 .../clientpositive/spark/insert_into3.q.out     |   12 +-
 .../results/clientpositive/spark/join1.q.out    |    3 +-
 .../results/clientpositive/spark/join14.q.out   |    3 +-
 .../results/clientpositive/spark/join17.q.out   |    3 +-
 .../results/clientpositive/spark/join2.q.out    |    3 +-
 .../results/clientpositive/spark/join25.q.out   |    3 +-
 .../results/clientpositive/spark/join26.q.out   |    3 +-
 .../results/clientpositive/spark/join27.q.out   |    3 +-
 .../results/clientpositive/spark/join28.q.out   |    3 +-
 .../results/clientpositive/spark/join29.q.out   |    3 +-
 .../results/clientpositive/spark/join3.q.out    |    3 +-
 .../results/clientpositive/spark/join30.q.out   |    3 +-
 .../results/clientpositive/spark/join31.q.out   |    3 +-
 .../results/clientpositive/spark/join32.q.out   |    3 +-
 .../clientpositive/spark/join32_lessSize.q.out  |   18 +-
 .../results/clientpositive/spark/join33.q.out   |    3 +-
 .../results/clientpositive/spark/join34.q.out   |    3 +-
 .../results/clientpositive/spark/join35.q.out   |    3 +-
 .../results/clientpositive/spark/join36.q.out   |    3 +-
 .../results/clientpositive/spark/join37.q.out   |    3 +-
 .../results/clientpositive/spark/join39.q.out   |    3 +-
 .../results/clientpositive/spark/join4.q.out    |    3 +-
 .../results/clientpositive/spark/join5.q.out    |    3 +-
 .../results/clientpositive/spark/join6.q.out    |    3 +-
 .../results/clientpositive/spark/join7.q.out    |    3 +-
 .../results/clientpositive/spark/join8.q.out    |    3 +-
 .../results/clientpositive/spark/join9.q.out    |    3 +-
 .../clientpositive/spark/join_map_ppr.q.out     |    6 +-
 .../spark/list_bucket_dml_10.q.out              |    3 +-
 .../spark/list_bucket_dml_2.q.out               |    3 +-
 .../clientpositive/spark/load_dyn_part1.q.out   |    6 +-
 .../clientpositive/spark/load_dyn_part10.q.out  |    3 +-
 .../clientpositive/spark/load_dyn_part13.q.out  |    3 +-
 .../clientpositive/spark/load_dyn_part14.q.out  |    3 +-
 .../clientpositive/spark/load_dyn_part2.q.out   |    3 +-
 .../clientpositive/spark/load_dyn_part3.q.out   |    3 +-
 .../clientpositive/spark/load_dyn_part4.q.out   |    3 +-
 .../clientpositive/spark/load_dyn_part5.q.out   |    3 +-
 .../clientpositive/spark/load_dyn_part8.q.out   |    6 +-
 .../clientpositive/spark/load_dyn_part9.q.out   |    3 +-
 .../clientpositive/spark/mapreduce1.q.out       |    3 +-
 .../clientpositive/spark/mapreduce2.q.out       |    3 +-
 .../results/clientpositive/spark/merge1.q.out   |    9 +-
 .../results/clientpositive/spark/merge2.q.out   |    9 +-
 .../spark/metadata_only_queries.q.out           |   14 +
 .../metadata_only_queries_with_filters.q.out    |    8 +
 .../clientpositive/spark/multi_insert.q.out     |   72 +-
 .../clientpositive/spark/multi_insert_gby.q.out |   12 +-
 .../spark/multi_insert_gby2.q.out               |    6 +-
 .../spark/multi_insert_gby3.q.out               |   27 +-
 .../spark/multi_insert_lateral_view.q.out       |   42 +-
 .../spark/multi_insert_mixed.q.out              |    9 +-
 ...i_insert_move_tasks_share_dependencies.q.out |   96 +-
 .../spark/multigroupby_singlemr.q.out           |   33 +-
 .../clientpositive/spark/orc_merge1.q.out       |    9 +-
 .../clientpositive/spark/orc_merge2.q.out       |    3 +-
 .../clientpositive/spark/orc_merge3.q.out       |    3 +-
 .../clientpositive/spark/orc_merge4.q.out       |    3 +-
 .../clientpositive/spark/orc_merge5.q.out       |    9 +-
 .../clientpositive/spark/orc_merge6.q.out       |    9 +-
 .../clientpositive/spark/orc_merge7.q.out       |    9 +-
 .../spark/orc_merge_diff_fs.q.out               |    9 +-
 .../spark/orc_merge_incompat1.q.out             |    3 +-
 .../spark/orc_merge_incompat2.q.out             |    6 +-
 .../results/clientpositive/spark/parallel.q.out |    6 +-
 .../clientpositive/spark/parallel_join1.q.out   |    3 +-
 .../clientpositive/spark/parallel_orderby.q.out |    3 +-
 .../test/results/clientpositive/spark/pcr.q.out |   12 +-
 .../clientpositive/spark/ppd_multi_insert.q.out |   18 +-
 .../test/results/clientpositive/spark/ptf.q.out |    6 +-
 .../spark/reduce_deduplicate.q.out              |    6 +-
 .../results/clientpositive/spark/sample1.q.out  |    3 +-
 .../results/clientpositive/spark/sample2.q.out  |    3 +-
 .../results/clientpositive/spark/sample4.q.out  |    3 +-
 .../results/clientpositive/spark/sample5.q.out  |    3 +-
 .../results/clientpositive/spark/sample6.q.out  |    3 +-
 .../results/clientpositive/spark/sample7.q.out  |    3 +-
 .../results/clientpositive/spark/skewjoin.q.out |    3 +-
 .../clientpositive/spark/skewjoin_noskew.q.out  |    3 +-
 .../clientpositive/spark/smb_mapjoin_1.q.out    |   43 +
 .../clientpositive/spark/smb_mapjoin_11.q.out   |    3 +-
 .../clientpositive/spark/smb_mapjoin_12.q.out   |    6 +-
 .../clientpositive/spark/smb_mapjoin_18.q.out   |    9 +-
 .../clientpositive/spark/smb_mapjoin_19.q.out   |    3 +-
 .../clientpositive/spark/smb_mapjoin_20.q.out   |    9 +-
 .../clientpositive/spark/smb_mapjoin_21.q.out   |   18 +-
 .../clientpositive/spark/smb_mapjoin_22.q.out   |    6 +-
 .../clientpositive/spark/smb_mapjoin_6.q.out    |   12 +-
 .../clientpositive/spark/smb_mapjoin_7.q.out    |    3 +-
 .../spark/spark_dynamic_partition_pruning.q.out |    3 +-
 .../spark/spark_explainuser_1.q.out             |   14 +-
 .../spark_multi_insert_parallel_orderby.q.out   |   18 +-
 .../spark/spark_use_ts_stats_for_mapjoin.q.out  |    3 +-
 ...k_vectorized_dynamic_partition_pruning.q.out |    3 +-
 .../results/clientpositive/spark/stats0.q.out   |   12 +-
 .../results/clientpositive/spark/stats1.q.out   |    3 +-
 .../results/clientpositive/spark/stats10.q.out  |    6 +-
 .../results/clientpositive/spark/stats12.q.out  |    5 +-
 .../results/clientpositive/spark/stats13.q.out  |    5 +-
 .../results/clientpositive/spark/stats2.q.out   |    3 +-
 .../results/clientpositive/spark/stats3.q.out   |    3 +-
 .../results/clientpositive/spark/stats5.q.out   |    3 +-
 .../results/clientpositive/spark/stats7.q.out   |    3 +-
 .../results/clientpositive/spark/stats8.q.out   |   15 +-
 .../results/clientpositive/spark/stats9.q.out   |    3 +-
 .../clientpositive/spark/stats_noscan_1.q.out   |    6 +-
 .../clientpositive/spark/stats_only_null.q.out  |   16 +
 .../spark/stats_partscan_1_23.q.out             |  188 +++
 .../spark/subquery_multiinsert.q.out            |   12 +-
 .../clientpositive/spark/temp_table.q.out       |    6 +-
 .../results/clientpositive/spark/union10.q.out  |    3 +-
 .../results/clientpositive/spark/union12.q.out  |    3 +-
 .../results/clientpositive/spark/union17.q.out  |    6 +-
 .../results/clientpositive/spark/union18.q.out  |    6 +-
 .../results/clientpositive/spark/union19.q.out  |    6 +-
 .../results/clientpositive/spark/union22.q.out  |    3 +-
 .../results/clientpositive/spark/union25.q.out  |    3 +-
 .../results/clientpositive/spark/union28.q.out  |    3 +-
 .../results/clientpositive/spark/union29.q.out  |    3 +-
 .../results/clientpositive/spark/union30.q.out  |    3 +-
 .../results/clientpositive/spark/union31.q.out  |   18 +-
 .../results/clientpositive/spark/union33.q.out  |    6 +-
 .../results/clientpositive/spark/union4.q.out   |    3 +-
 .../results/clientpositive/spark/union6.q.out   |    3 +-
 .../spark/union_lateralview.q.out               |    3 +-
 .../clientpositive/spark/union_top_level.q.out  |    9 +-
 .../clientpositive/spark/vector_char_4.q.out    |    3 +-
 .../spark/vector_outer_join1.q.out              |    2 +
 .../spark/vector_outer_join2.q.out              |    2 +
 .../spark/vector_outer_join3.q.out              |    2 +
 .../spark/vector_outer_join4.q.out              |    2 +
 .../spark/vector_outer_join5.q.out              |    8 +
 .../clientpositive/spark/vector_varchar_4.q.out |    3 +-
 .../clientpositive/spark/vectorized_ptf.q.out   |    6 +-
 .../special_character_in_tabnames_2.q.out       |    2 +
 ql/src/test/results/clientpositive/stats0.q.out |   12 +-
 ql/src/test/results/clientpositive/stats1.q.out |    3 +-
 .../test/results/clientpositive/stats10.q.out   |    6 +-
 .../test/results/clientpositive/stats12.q.out   |    5 +-
 .../test/results/clientpositive/stats13.q.out   |    5 +-
 ql/src/test/results/clientpositive/stats2.q.out |    3 +-
 ql/src/test/results/clientpositive/stats3.q.out |    3 +-
 ql/src/test/results/clientpositive/stats4.q.out |    6 +-
 ql/src/test/results/clientpositive/stats5.q.out |    3 +-
 ql/src/test/results/clientpositive/stats7.q.out |    3 +-
 ql/src/test/results/clientpositive/stats8.q.out |   15 +-
 ql/src/test/results/clientpositive/stats9.q.out |    3 +-
 .../clientpositive/stats_empty_dyn_part.q.out   |    3 +-
 .../clientpositive/stats_invalidation.q.out     |    2 +
 .../clientpositive/stats_missing_warning.q.out  |    6 +
 .../results/clientpositive/stats_noscan_1.q.out |    6 +-
 .../clientpositive/stats_only_null.q.out        |   16 +
 .../clientpositive/stats_partial_size.q.out     |    2 +
 .../clientpositive/stats_partscan_1_23.q.out    |  191 +++
 .../results/clientpositive/stats_ppr_all.q.out  |    8 +
 .../clientpositive/subquery_multiinsert.q.out   |   12 +-
 .../temp_table_display_colstats_tbllvl.q.out    |  164 +-
 .../tez/acid_vectorization_original_tez.q.out   |    3 +-
 .../clientpositive/tez/explainanalyze_1.q.out   |    4 +-
 .../clientpositive/tez/explainanalyze_2.q.out   |   24 +-
 .../clientpositive/tez/explainanalyze_3.q.out   |   34 +-
 .../clientpositive/tez/explainanalyze_5.q.out   |  162 +-
 .../clientpositive/tez/explainuser_3.q.out      |   38 +-
 .../results/clientpositive/tunable_ndv.q.out    |   22 +
 ql/src/test/results/clientpositive/udf1.q.out   |    3 +-
 ql/src/test/results/clientpositive/udf3.q.out   |    3 +-
 .../results/clientpositive/udf_10_trims.q.out   |    3 +-
 .../clientpositive/udf_character_length.q.out   |    3 +-
 .../results/clientpositive/udf_length.q.out     |    3 +-
 .../clientpositive/udf_octet_length.q.out       |    3 +-
 .../results/clientpositive/udf_reverse.q.out    |    3 +-
 .../test/results/clientpositive/union10.q.out   |    3 +-
 .../test/results/clientpositive/union12.q.out   |    3 +-
 .../test/results/clientpositive/union17.q.out   |    6 +-
 .../test/results/clientpositive/union18.q.out   |    6 +-
 .../test/results/clientpositive/union19.q.out   |    6 +-
 .../test/results/clientpositive/union22.q.out   |    3 +-
 .../test/results/clientpositive/union25.q.out   |    3 +-
 .../test/results/clientpositive/union28.q.out   |    3 +-
 .../test/results/clientpositive/union29.q.out   |    3 +-
 .../test/results/clientpositive/union30.q.out   |    3 +-
 .../test/results/clientpositive/union31.q.out   |   18 +-
 .../test/results/clientpositive/union33.q.out   |    6 +-
 ql/src/test/results/clientpositive/union4.q.out |    3 +-
 ql/src/test/results/clientpositive/union6.q.out |    3 +-
 .../clientpositive/union_lateralview.q.out      |    3 +-
 .../results/clientpositive/union_stats.q.out    |    3 +-
 .../clientpositive/updateAccessTime.q.out       |    2 +
 .../results/clientpositive/vector_bucket.q.out  |    3 +-
 .../results/clientpositive/vector_char_4.q.out  |    3 +-
 .../results/clientpositive/vector_const.q.out   |    2 +-
 .../clientpositive/vector_decimal_6.q.out       |    3 +-
 .../clientpositive/vector_groupby4.q.out        |    3 +-
 .../clientpositive/vector_groupby6.q.out        |    3 +-
 .../clientpositive/vector_if_expr_2.q.out       |   10 +-
 .../results/clientpositive/vector_like_2.q.out  |   10 +-
 .../clientpositive/vector_multi_insert.q.out    |    9 +-
 .../clientpositive/vector_outer_join1.q.out     |    2 +
 .../clientpositive/vector_outer_join2.q.out     |    2 +
 .../clientpositive/vector_outer_join3.q.out     |    2 +
 .../clientpositive/vector_outer_join4.q.out     |    2 +
 .../vector_outer_reference_windowed.q.out       |  176 +-
 .../vector_tablesample_rows.q.out               |    6 +-
 .../vector_udf_character_length.q.out           |    7 +-
 .../vector_udf_octet_length.q.out               |    5 +-
 .../clientpositive/vector_varchar_4.q.out       |    3 +-
 .../clientpositive/vector_varchar_simple.q.out  |    3 +-
 .../clientpositive/vector_windowing.q.out       |    9 +-
 .../vector_windowing_expressions.q.out          |    6 +-
 .../vector_windowing_streaming.q.out            |    3 +-
 .../clientpositive/vectorized_mapjoin2.q.out    |   14 +-
 .../hadoop/hive/common/StatsSetupConst.java     |   12 +-
 .../hadoop/hive/metastore/HiveAlterHandler.java |   18 +-
 .../hadoop/hive/metastore/ObjectStore.java      |    2 +-
 .../TransactionalValidationListener.java        |    5 +
 .../merge/ColumnStatsMergerFactory.java         |    2 +-
 885 files changed, 13156 insertions(+), 4184 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
index 59bca50..f14c3d6 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
@@ -119,7 +119,8 @@ STAGE PLANS:
               name: default.src_x1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-6
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
index a73893f..5379754 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/Vertex.java
@@ -249,7 +249,8 @@ public final class Vertex implements Comparable<Vertex>{
       // find the right op
       Op choose = null;
       for (Op op : this.outputOps) {
-        if (op.outputVertexName.equals(callingVertex.name)) {
+        // op.outputVertexName may be null
+        if (callingVertex.name.equals(op.outputVertexName)) {
           choose = op;
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/contrib/src/test/results/clientpositive/serde_typedbytes.q.out
----------------------------------------------------------------------
diff --git a/contrib/src/test/results/clientpositive/serde_typedbytes.q.out b/contrib/src/test/results/clientpositive/serde_typedbytes.q.out
index 6876ca8..c844a70 100644
--- a/contrib/src/test/results/clientpositive/serde_typedbytes.q.out
+++ b/contrib/src/test/results/clientpositive/serde_typedbytes.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/contrib/src/test/results/clientpositive/serde_typedbytes2.q.out
----------------------------------------------------------------------
diff --git a/contrib/src/test/results/clientpositive/serde_typedbytes2.q.out b/contrib/src/test/results/clientpositive/serde_typedbytes2.q.out
index 79cf8fe..c0228aa 100644
--- a/contrib/src/test/results/clientpositive/serde_typedbytes2.q.out
+++ b/contrib/src/test/results/clientpositive/serde_typedbytes2.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/contrib/src/test/results/clientpositive/serde_typedbytes3.q.out
----------------------------------------------------------------------
diff --git a/contrib/src/test/results/clientpositive/serde_typedbytes3.q.out b/contrib/src/test/results/clientpositive/serde_typedbytes3.q.out
index fec58ef..0b7541a 100644
--- a/contrib/src/test/results/clientpositive/serde_typedbytes3.q.out
+++ b/contrib/src/test/results/clientpositive/serde_typedbytes3.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/contrib/src/test/results/clientpositive/serde_typedbytes4.q.out
----------------------------------------------------------------------
diff --git a/contrib/src/test/results/clientpositive/serde_typedbytes4.q.out b/contrib/src/test/results/clientpositive/serde_typedbytes4.q.out
index 1131478..981ff21 100644
--- a/contrib/src/test/results/clientpositive/serde_typedbytes4.q.out
+++ b/contrib/src/test/results/clientpositive/serde_typedbytes4.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/contrib/src/test/results/clientpositive/serde_typedbytes5.q.out
----------------------------------------------------------------------
diff --git a/contrib/src/test/results/clientpositive/serde_typedbytes5.q.out b/contrib/src/test/results/clientpositive/serde_typedbytes5.q.out
index 8d3b95e..5a7df3c 100644
--- a/contrib/src/test/results/clientpositive/serde_typedbytes5.q.out
+++ b/contrib/src/test/results/clientpositive/serde_typedbytes5.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/data/scripts/q_test_init_src.sql
----------------------------------------------------------------------
diff --git a/data/scripts/q_test_init_src.sql b/data/scripts/q_test_init_src.sql
index 56b44e0..2a62d29 100644
--- a/data/scripts/q_test_init_src.sql
+++ b/data/scripts/q_test_init_src.sql
@@ -3,3 +3,8 @@ DROP TABLE IF EXISTS src PURGE;
 CREATE TABLE src(key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS TEXTFILE;
 
 LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/kv1.txt" OVERWRITE INTO TABLE src;
+
+ANALYZE TABLE src COMPUTE STATISTICS;
+
+ANALYZE TABLE src COMPUTE STATISTICS FOR COLUMNS key,value;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
index e8927e9..efc4a11 100644
--- a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
@@ -119,7 +119,8 @@ STAGE PLANS:
               name: default.src_x1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-6
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/hive-blobstore/src/test/results/clientpositive/explain.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/explain.q.out b/itests/hive-blobstore/src/test/results/clientpositive/explain.q.out
index 09197f9..cae2a13 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/explain.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/explain.q.out
@@ -88,7 +88,8 @@ STAGE PLANS:
               name: default.blobstore_table
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: SELECT * FROM blobstore_table
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out
index e55b1c2..daf95c3 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_dynamic_partitions.q.out
@@ -208,8 +208,9 @@ STAGE PLANS:
               name: default.table1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
 
 PREHOOK: query: DROP TABLE table1
 PREHOOK: type: DROPTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
index f50f4af..5349210 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
@@ -168,8 +168,9 @@ STAGE PLANS:
               name: default.table1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out
index 660cebb..42b9821 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_dynamic_partitions.q.out
@@ -226,8 +226,9 @@ STAGE PLANS:
               name: default.table1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
 
 PREHOOK: query: DROP TABLE table1
 PREHOOK: type: DROPTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out
index ba0e83d..cae1a5b 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_overwrite_table.q.out
@@ -176,8 +176,9 @@ STAGE PLANS:
               name: default.table1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
index 2ababb1..45e6d25 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
@@ -245,7 +245,8 @@ STAGE PLANS:
               name: default.blobstore_table
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: EXPLAIN EXTENDED FROM hdfs_table INSERT OVERWRITE TABLE blobstore_table SELECT hdfs_table.key GROUP BY hdfs_table.key ORDER BY hdfs_table.key
@@ -469,8 +470,9 @@ STAGE PLANS:
               name: default.blobstore_table
 
   Stage: Stage-3
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: ### BLOBSTORE_STAGING_PATH ###
 
 PREHOOK: query: DROP TABLE hdfs_table
 PREHOOK: type: DROPTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
index ad2baa2..e8ef4b9 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMTQueries.java
@@ -44,6 +44,7 @@ public class TestMTQueries extends BaseTestQueries {
       util.getConf().setBoolean("hive.exec.submit.local.task.via.child", true);
       util.getConf().set("hive.stats.dbclass", "fs");
       util.getConf().set("hive.mapred.mode", "nonstrict");
+      util.getConf().set("hive.stats.column.autogather", "false");
     }
     boolean success = QTestUtil.queryListRunnerMultiThreaded(qfiles, qts);
     if (!success) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 2edf749..f5ed735 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -203,6 +203,7 @@ public class TestJdbcWithMiniHS2 {
   private static void startMiniHS2(HiveConf conf, boolean httpMode) throws Exception {
     conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
     conf.setBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED, false);
+    conf.setBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER, false);
     MiniHS2.Builder builder = new MiniHS2.Builder().withConf(conf).cleanupLocalDirOnStartup(false);
     if (httpMode) {
       builder = builder.withHTTPTransport();

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 46abf8a..5f92321 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -116,6 +116,9 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\
   auto_sortmerge_join_7.q,\
   auto_sortmerge_join_8.q,\
   auto_sortmerge_join_9.q,\
+  autoColumnStats_1.q,\
+  autoColumnStats_10.q,\
+  autoColumnStats_2.q,\
   bucket2.q,\
   bucket3.q,\
   bucket4.q,\
@@ -486,8 +489,6 @@ minillaplocal.query.files=\
   auto_sortmerge_join_6.q,\
   auto_sortmerge_join_8.q,\
   auto_sortmerge_join_9.q,\
-  autoColumnStats_1.q,\
-  autoColumnStats_2.q,\
   bucket4.q,\
   bucket_groupby.q,\
   bucket_many.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index f29a20d..ed58b41 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -33,6 +33,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -61,6 +62,7 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimaps;
+
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -6923,39 +6925,80 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           if (request.isSetNeedMerge() && request.isNeedMerge()) {
             // one single call to get all column stats
             ColumnStatistics csOld = getMS().getTableColumnStatistics(dbName, tableName, colNames);
-            if (csOld != null && csOld.getStatsObjSize() != 0) {
+            Table t = getTable(dbName, tableName);
+            // we first use t.getParameters() to prune the stats
+            MetaStoreUtils.getMergableCols(firstColStats, t.getParameters());
+            // we merge those that can be merged
+            if (csOld != null && csOld.getStatsObjSize() != 0
+                && !firstColStats.getStatsObj().isEmpty()) {
               MetaStoreUtils.mergeColStats(firstColStats, csOld);
             }
+            if (!firstColStats.getStatsObj().isEmpty()) {
+              return update_table_column_statistics(firstColStats);
+            } else {
+              LOG.debug("All the column stats are not accurate to merge.");
+              return true;
+            }
+          } else {
+            // This is the overwrite case, we do not care about the accuracy.
+            return update_table_column_statistics(firstColStats);
           }
-          return update_table_column_statistics(firstColStats);
         }
       } else {
         // partition level column stats merging
-        List<String> partitionNames = new ArrayList<>();
+        List<Partition> partitions = new ArrayList<>();
+        // note that we may have two or more duplicate partition names.
+        // see autoColumnStats_2.q under TestMiniLlapLocalCliDriver
+        Map<String, ColumnStatistics> newStatsMap = new HashMap<>();
         for (ColumnStatistics csNew : csNews) {
-          partitionNames.add(csNew.getStatsDesc().getPartName());
+          String partName = csNew.getStatsDesc().getPartName();
+          if (newStatsMap.containsKey(partName)) {
+            MetaStoreUtils.mergeColStats(csNew, newStatsMap.get(partName));
+          }
+          newStatsMap.put(partName, csNew);
         }
-        Map<String, ColumnStatistics> map = new HashMap<>();
+        
+        Map<String, ColumnStatistics> oldStatsMap = new HashMap<>();
+        Map<String, Partition> mapToPart = new HashMap<>();
         if (request.isSetNeedMerge() && request.isNeedMerge()) {
           // a single call to get all column stats for all partitions
+          List<String> partitionNames = new ArrayList<>();
+          partitionNames.addAll(newStatsMap.keySet());
           List<ColumnStatistics> csOlds = getMS().getPartitionColumnStatistics(dbName, tableName,
               partitionNames, colNames);
-          if (csNews.size() != csOlds.size()) {
+          if (newStatsMap.values().size() != csOlds.size()) {
             // some of the partitions miss stats.
             LOG.debug("Some of the partitions miss stats.");
           }
           for (ColumnStatistics csOld : csOlds) {
-            map.put(csOld.getStatsDesc().getPartName(), csOld);
+            oldStatsMap.put(csOld.getStatsDesc().getPartName(), csOld);
+          }
+          // another single call to get all the partition objects
+          partitions = getMS().getPartitionsByNames(dbName, tableName, partitionNames);
+          for (int index = 0; index < partitionNames.size(); index++) {
+            mapToPart.put(partitionNames.get(index), partitions.get(index));
           }
         }
         Table t = getTable(dbName, tableName);
-        for (int index = 0; index < csNews.size(); index++) {
-          ColumnStatistics csNew = csNews.get(index);
-          ColumnStatistics csOld = map.get(csNew.getStatsDesc().getPartName());
-          if (csOld != null && csOld.getStatsObjSize() != 0) {
-            MetaStoreUtils.mergeColStats(csNew, csOld);
+        for (Entry<String, ColumnStatistics> entry : newStatsMap.entrySet()) {
+          ColumnStatistics csNew = entry.getValue();
+          ColumnStatistics csOld = oldStatsMap.get(entry.getKey());
+          if (request.isSetNeedMerge() && request.isNeedMerge()) {
+            // we first use getParameters() to prune the stats
+            MetaStoreUtils.getMergableCols(csNew, mapToPart.get(entry.getKey()).getParameters());
+            // we merge those that can be merged
+            if (csOld != null && csOld.getStatsObjSize() != 0 && !csNew.getStatsObj().isEmpty()) {
+              MetaStoreUtils.mergeColStats(csNew, csOld);
+            }
+            if (!csNew.getStatsObj().isEmpty()) {
+              ret = ret && updatePartitonColStats(t, csNew);
+            } else {
+              LOG.debug("All the column stats " + csNew.getStatsDesc().getPartName()
+                  + " are not accurate to merge.");
+            }
+          } else {
+            ret = ret && updatePartitonColStats(t, csNew);
           }
-          ret = ret && updatePartitonColStats(t, csNew);
         }
       }
       return ret;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index a491789..24590b9 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -38,11 +38,10 @@ import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Predicates;
+import com.google.common.collect.Maps;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.slf4j.Logger;
@@ -1596,6 +1595,19 @@ public class MetaStoreUtils {
     return new URLClassLoader(curPath.toArray(new URL[0]), loader);
   }
 
+  protected static void getMergableCols(ColumnStatistics csNew, Map<String, String> parameters) {
+    List<ColumnStatisticsObj> list = new ArrayList<>();
+    for (int index = 0; index < csNew.getStatsObj().size(); index++) {
+      ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
+      // canColumnStatsMerge guarantees that it is accurate before we do merge
+      if (StatsSetupConst.canColumnStatsMerge(parameters, statsObjNew.getColName())) {
+        list.add(statsObjNew);
+      }
+      // in all the other cases, we can not merge
+    }
+    csNew.setStatsObj(list);
+  }
+
   // this function will merge csOld into csNew.
   public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld)
       throws InvalidObjectException {
@@ -1619,13 +1631,20 @@ public class MetaStoreUtils {
       ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
       ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName());
       if (statsObjOld != null) {
+        // because we already confirm that the stats is accurate
+        // it is impossible that the column types have been changed while the
+        // column stats is still accurate.
+        assert (statsObjNew.getStatsData().getSetField() == statsObjOld.getStatsData()
+            .getSetField());
         // If statsObjOld is found, we can merge.
         ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew,
             statsObjOld);
         merger.merge(statsObjNew, statsObjOld);
       }
+      // If statsObjOld is not found, we just use statsObjNew as it is accurate.
       list.add(statsObjNew);
     }
+    // in all the other cases, we can not merge
     csNew.setStatsObj(list);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index dc31bd5..9cd32d2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1000,6 +1000,7 @@
           <configuration>
             <downloadJavadocs>true</downloadJavadocs>
             <downloadSources>true</downloadSources>
+            <buildOutputDirectory>target/eclipse/classes</buildOutputDirectory>
             <workspaceActiveCodeStyleProfileName>Hive</workspaceActiveCodeStyleProfileName>
             <workspaceCodeStylesURL>${basedir}/dev-support/eclipse-styles.xml</workspaceCodeStylesURL>
           </configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java b/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java
index 583d3d3..1789cc3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/DriverContext.java
@@ -18,11 +18,11 @@
 
 package org.apache.hadoop.hive.ql;
 
+import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.NodeUtils;
 import org.apache.hadoop.hive.ql.exec.NodeUtils.Function;
 import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskRunner;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
@@ -64,7 +64,7 @@ public class DriverContext {
   private Context ctx;
   private boolean shutdown;
 
-  final Map<String, StatsTask> statsTasks = new HashMap<String, StatsTask>(1);
+  final Map<String, StatsTask> statsTasks = new HashMap<>(1);
 
   public DriverContext() {
   }
@@ -191,7 +191,9 @@ public class DriverContext {
     NodeUtils.iterateTask(rootTasks, StatsTask.class, new Function<StatsTask>() {
       @Override
       public void apply(StatsTask statsTask) {
-        statsTasks.put(statsTask.getWork().getAggKey(), statsTask);
+        if (statsTask.getWork().isAggregating()) {
+          statsTasks.put(statsTask.getWork().getAggKey(), statsTask);
+        }
       }
     });
   }


[18/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/temp_table_display_colstats_tbllvl.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/temp_table_display_colstats_tbllvl.q b/ql/src/test/queries/clientpositive/temp_table_display_colstats_tbllvl.q
index 39a11f2..e121583 100644
--- a/ql/src/test/queries/clientpositive/temp_table_display_colstats_tbllvl.q
+++ b/ql/src/test/queries/clientpositive/temp_table_display_colstats_tbllvl.q
@@ -15,8 +15,11 @@ CREATE TEMPORARY EXTERNAL TABLE UserVisits_web_text_none (
 row format delimited fields terminated by '|'  stored as textfile
 location 'pfile://${system:test.tmp.dir}/uservisits_web_text_none';
 
+desc formatted UserVisits_web_text_none;
+
 LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
 
+desc formatted UserVisits_web_text_none;
 desc extended UserVisits_web_text_none sourceIP;
 desc formatted UserVisits_web_text_none sourceIP;
 
@@ -27,6 +30,7 @@ explain extended
 analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
 
 analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
+desc formatted UserVisits_web_text_none;
 desc formatted UserVisits_web_text_none sourceIP;
 desc formatted UserVisits_web_text_none avgTimeOnSite;
 desc formatted UserVisits_web_text_none adRevenue;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/acid_table_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_table_stats.q.out b/ql/src/test/results/clientpositive/acid_table_stats.q.out
index 32c8531..351ff0d 100644
--- a/ql/src/test/results/clientpositive/acid_table_stats.q.out
+++ b/ql/src/test/results/clientpositive/acid_table_stats.q.out
@@ -91,6 +91,7 @@ Database:           	default
 Table:              	acid                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	2                   
 	numRows             	0                   
 	rawDataSize         	0                   
@@ -226,11 +227,15 @@ PREHOOK: query: analyze table acid partition(ds='2008-04-08') compute statistics
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08
+PREHOOK: Output: default@acid
+PREHOOK: Output: default@acid@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table acid partition(ds='2008-04-08') compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
+POSTHOOK: Output: default@acid
+POSTHOOK: Output: default@acid@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted acid partition(ds='2008-04-08')
 PREHOOK: type: DESCTABLE
@@ -378,6 +383,7 @@ Database:           	default
 Table:              	acid                	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	4                   
 	numRows             	1000                
 	rawDataSize         	208000              
@@ -502,11 +508,15 @@ PREHOOK: query: analyze table acid partition(ds='2008-04-08') compute statistics
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
 PREHOOK: Input: default@acid@ds=2008-04-08
+PREHOOK: Output: default@acid
+PREHOOK: Output: default@acid@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table acid partition(ds='2008-04-08') compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@acid
 POSTHOOK: Input: default@acid@ds=2008-04-08
+POSTHOOK: Output: default@acid
+POSTHOOK: Output: default@acid@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: explain select max(key) from acid where ds='2008-04-08'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/alterColumnStatsPart.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alterColumnStatsPart.q.out b/ql/src/test/results/clientpositive/alterColumnStatsPart.q.out
index 6b9b7e5..858e16f 100644
--- a/ql/src/test/results/clientpositive/alterColumnStatsPart.q.out
+++ b/ql/src/test/results/clientpositive/alterColumnStatsPart.q.out
@@ -154,11 +154,15 @@ PREHOOK: query: analyze table p partition(c=1) compute statistics for columns a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@p
 PREHOOK: Input: default@p@c=1
+PREHOOK: Output: default@p
+PREHOOK: Output: default@p@c=1
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table p partition(c=1) compute statistics for columns a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@p
 POSTHOOK: Input: default@p@c=1
+POSTHOOK: Output: default@p
+POSTHOOK: Output: default@p@c=1
 #### A masked pattern was here ####
 PREHOOK: query: explain select max(a) from p where c=1
 PREHOOK: type: QUERY
@@ -371,11 +375,15 @@ PREHOOK: query: analyze table p partition(c=100) compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@p
 PREHOOK: Input: default@p@c=100
+PREHOOK: Output: default@p
+PREHOOK: Output: default@p@c=100
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table p partition(c=100) compute statistics for columns a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@p
 POSTHOOK: Input: default@p@c=100
+POSTHOOK: Output: default@p
+POSTHOOK: Output: default@p@c=100
 #### A masked pattern was here ####
 PREHOOK: query: explain select max(a) from p where c=100
 PREHOOK: type: QUERY
@@ -474,11 +482,15 @@ PREHOOK: query: analyze table p partition(c=100) compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@p
 PREHOOK: Input: default@p@c=100
+PREHOOK: Output: default@p
+PREHOOK: Output: default@p@c=100
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table p partition(c=100) compute statistics for columns a
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@p
 POSTHOOK: Input: default@p@c=100
+POSTHOOK: Output: default@p
+POSTHOOK: Output: default@p@c=100
 #### A masked pattern was here ####
 PREHOOK: query: explain select max(a) from p where c=100
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/alter_partition_update_status.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_update_status.q.out b/ql/src/test/results/clientpositive/alter_partition_update_status.q.out
index 37b24ed..9aab9b2 100644
--- a/ql/src/test/results/clientpositive/alter_partition_update_status.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_update_status.q.out
@@ -22,11 +22,15 @@ PREHOOK: query: ANALYZE TABLE src_stat_part_one PARTITION(partitionId=1) COMPUTE
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat_part_one
 PREHOOK: Input: default@src_stat_part_one@partitionid=1
+PREHOOK: Output: default@src_stat_part_one
+PREHOOK: Output: default@src_stat_part_one@partitionid=1
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat_part_one PARTITION(partitionId=1) COMPUTE STATISTICS for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat_part_one
 POSTHOOK: Input: default@src_stat_part_one@partitionid=1
+POSTHOOK: Output: default@src_stat_part_one
+POSTHOOK: Output: default@src_stat_part_one@partitionid=1
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_stat_part_one PARTITION(partitionId=1) key
 PREHOOK: type: DESCTABLE
@@ -92,11 +96,15 @@ PREHOOK: query: ANALYZE TABLE src_stat_part_two PARTITION(px=1) COMPUTE STATISTI
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat_part_two
 PREHOOK: Input: default@src_stat_part_two@px=1/py=a
+PREHOOK: Output: default@src_stat_part_two
+PREHOOK: Output: default@src_stat_part_two@px=1/py=a
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat_part_two PARTITION(px=1) COMPUTE STATISTICS for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat_part_two
 POSTHOOK: Input: default@src_stat_part_two@px=1/py=a
+POSTHOOK: Output: default@src_stat_part_two
+POSTHOOK: Output: default@src_stat_part_two@px=1/py=a
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_stat_part_two PARTITION(px=1, py='a') key
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/alter_table_column_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_column_stats.q.out b/ql/src/test/results/clientpositive/alter_table_column_stats.q.out
index a648335..a8ef59c 100644
--- a/ql/src/test/results/clientpositive/alter_table_column_stats.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_column_stats.q.out
@@ -77,10 +77,12 @@ PREHOOK: query: analyze table testtable0 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: statsdb1@testtable0
 #### A masked pattern was here ####
+PREHOOK: Output: statsdb1@testtable0
 POSTHOOK: query: analyze table testtable0 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: statsdb1@testtable0
 #### A masked pattern was here ####
+POSTHOOK: Output: statsdb1@testtable0
 PREHOOK: query: describe formatted statsdb1.testtable0
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: statsdb1@testtable0
@@ -585,12 +587,18 @@ PREHOOK: Input: statsdb1@testpart0
 PREHOOK: Input: statsdb1@testpart0@part=part1
 PREHOOK: Input: statsdb1@testpart0@part=part2
 #### A masked pattern was here ####
+PREHOOK: Output: statsdb1@testpart0
+PREHOOK: Output: statsdb1@testpart0@part=part1
+PREHOOK: Output: statsdb1@testpart0@part=part2
 POSTHOOK: query: analyze table testpart0 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: statsdb1@testpart0
 POSTHOOK: Input: statsdb1@testpart0@part=part1
 POSTHOOK: Input: statsdb1@testpart0@part=part2
 #### A masked pattern was here ####
+POSTHOOK: Output: statsdb1@testpart0
+POSTHOOK: Output: statsdb1@testpart0@part=part1
+POSTHOOK: Output: statsdb1@testpart0@part=part2
 PREHOOK: query: describe formatted statsdb1.testpart0
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: statsdb1@testpart0
@@ -1939,10 +1947,12 @@ PREHOOK: query: analyze table testtable0 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: statsdb1@testtable0
 #### A masked pattern was here ####
+PREHOOK: Output: statsdb1@testtable0
 POSTHOOK: query: analyze table testtable0 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: statsdb1@testtable0
 #### A masked pattern was here ####
+POSTHOOK: Output: statsdb1@testtable0
 PREHOOK: query: describe formatted statsdb1.testtable0
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: statsdb1@testtable0
@@ -2447,12 +2457,18 @@ PREHOOK: Input: statsdb1@testpart0
 PREHOOK: Input: statsdb1@testpart0@part=part1
 PREHOOK: Input: statsdb1@testpart0@part=part2
 #### A masked pattern was here ####
+PREHOOK: Output: statsdb1@testpart0
+PREHOOK: Output: statsdb1@testpart0@part=part1
+PREHOOK: Output: statsdb1@testpart0@part=part2
 POSTHOOK: query: analyze table testpart0 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: statsdb1@testpart0
 POSTHOOK: Input: statsdb1@testpart0@part=part1
 POSTHOOK: Input: statsdb1@testpart0@part=part2
 #### A masked pattern was here ####
+POSTHOOK: Output: statsdb1@testpart0
+POSTHOOK: Output: statsdb1@testpart0@part=part1
+POSTHOOK: Output: statsdb1@testpart0@part=part2
 PREHOOK: query: describe formatted statsdb1.testpart0
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: statsdb1@testpart0

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/alter_table_update_status.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_update_status.q.out b/ql/src/test/results/clientpositive/alter_table_update_status.q.out
index 899ebbd..c6140be 100644
--- a/ql/src/test/results/clientpositive/alter_table_update_status.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_update_status.q.out
@@ -35,10 +35,12 @@ POSTHOOK: Output: default@src_stat_int
 PREHOOK: query: ANALYZE TABLE src_stat COMPUTE STATISTICS for columns key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat
+PREHOOK: Output: default@src_stat
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat COMPUTE STATISTICS for columns key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat
+POSTHOOK: Output: default@src_stat
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_stat key
 PREHOOK: type: DESCTABLE
@@ -108,10 +110,12 @@ COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true
 PREHOOK: query: ANALYZE TABLE src_stat_int COMPUTE STATISTICS for columns key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat_int
+PREHOOK: Output: default@src_stat_int
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat_int COMPUTE STATISTICS for columns key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat_int
+POSTHOOK: Output: default@src_stat_int
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_stat_int key
 PREHOOK: type: DESCTABLE
@@ -131,7 +135,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
 POSTHOOK: query: ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22')
@@ -154,7 +158,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: create database if not exists dummydb
 PREHOOK: type: CREATEDATABASE
 PREHOOK: Output: database:dummydb

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out b/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
index 8e4ca0a..5ff136d 100644
--- a/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
@@ -35,10 +35,12 @@ POSTHOOK: Output: default@src_stat_int
 PREHOOK: query: ANALYZE TABLE src_stat COMPUTE STATISTICS for columns key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat
+PREHOOK: Output: default@src_stat
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat COMPUTE STATISTICS for columns key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat
+POSTHOOK: Output: default@src_stat
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_stat key
 PREHOOK: type: DESCTABLE
@@ -108,10 +110,12 @@ COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true
 PREHOOK: query: ANALYZE TABLE src_stat_int COMPUTE STATISTICS for columns key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat_int
+PREHOOK: Output: default@src_stat_int
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat_int COMPUTE STATISTICS for columns key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat_int
+POSTHOOK: Output: default@src_stat_int
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_stat_int key
 PREHOOK: type: DESCTABLE
@@ -131,7 +135,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	                    	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
 POSTHOOK: query: ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22')
@@ -154,7 +158,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	                    	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: create database if not exists dummydb
 PREHOOK: type: CREATEDATABASE
 PREHOOK: Output: database:dummydb

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/analyze_tbl_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/analyze_tbl_date.q.out b/ql/src/test/results/clientpositive/analyze_tbl_date.q.out
index 6b2b85b..e70c6b8 100644
--- a/ql/src/test/results/clientpositive/analyze_tbl_date.q.out
+++ b/ql/src/test/results/clientpositive/analyze_tbl_date.q.out
@@ -16,10 +16,12 @@ POSTHOOK: Lineage: test_table.d EXPRESSION [(values__tmp__table__1)values__tmp__
 PREHOOK: query: analyze table test_table compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@test_table
+PREHOOK: Output: default@test_table
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table test_table compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test_table
+POSTHOOK: Output: default@test_table
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted test_table
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/analyze_tbl_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out
index 0f38045..5899ab2 100644
--- a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out
+++ b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out
@@ -35,12 +35,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat_part
 PREHOOK: Input: default@src_stat_part@partitionid=1
 PREHOOK: Input: default@src_stat_part@partitionid=2
+PREHOOK: Output: default@src_stat_part
+PREHOOK: Output: default@src_stat_part@partitionid=1
+PREHOOK: Output: default@src_stat_part@partitionid=2
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat_part
 POSTHOOK: Input: default@src_stat_part@partitionid=1
 POSTHOOK: Input: default@src_stat_part@partitionid=2
+POSTHOOK: Output: default@src_stat_part
+POSTHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: Output: default@src_stat_part@partitionid=2
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) key
 PREHOOK: type: DESCTABLE
@@ -65,12 +71,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat_part
 PREHOOK: Input: default@src_stat_part@partitionid=1
 PREHOOK: Input: default@src_stat_part@partitionid=2
+PREHOOK: Output: default@src_stat_part
+PREHOOK: Output: default@src_stat_part@partitionid=1
+PREHOOK: Output: default@src_stat_part@partitionid=2
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat_part
 POSTHOOK: Input: default@src_stat_part@partitionid=1
 POSTHOOK: Input: default@src_stat_part@partitionid=2
+POSTHOOK: Output: default@src_stat_part
+POSTHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: Output: default@src_stat_part@partitionid=2
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1) key
 PREHOOK: type: DESCTABLE
@@ -144,19 +156,27 @@ PREHOOK: query: ANALYZE TABLE src_stat_string_part partition (partitionName="p\'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat_string_part
 PREHOOK: Input: default@src_stat_string_part@partitionname=p%271
+PREHOOK: Output: default@src_stat_string_part
+PREHOOK: Output: default@src_stat_string_part@partitionname=p%271
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat_string_part partition (partitionName="p\'1") COMPUTE STATISTICS for columns key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat_string_part
 POSTHOOK: Input: default@src_stat_string_part@partitionname=p%271
+POSTHOOK: Output: default@src_stat_string_part
+POSTHOOK: Output: default@src_stat_string_part@partitionname=p%271
 #### A masked pattern was here ####
 PREHOOK: query: ANALYZE TABLE src_stat_string_part partition (partitionName="p\"1") COMPUTE STATISTICS for columns key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_stat_string_part
 PREHOOK: Input: default@src_stat_string_part@partitionname=p%221
+PREHOOK: Output: default@src_stat_string_part
+PREHOOK: Output: default@src_stat_string_part@partitionname=p%221
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src_stat_string_part partition (partitionName="p\"1") COMPUTE STATISTICS for columns key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_stat_string_part
 POSTHOOK: Input: default@src_stat_string_part@partitionname=p%221
+POSTHOOK: Output: default@src_stat_string_part
+POSTHOOK: Output: default@src_stat_string_part@partitionname=p%221
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
index da59211..ab8c22b 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
@@ -59,10 +59,12 @@ POSTHOOK: Output: default@over1k
 PREHOOK: query: analyze table over1k compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table over1k compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k
 #### A masked pattern was here ####
 PREHOOK: query: explain select count(*) from over1k where (
 (t=1 and si=2)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
index defb088..20f870e 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
@@ -112,10 +112,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from loc_orc where state='OH'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
index cd4b0ad..4e67841 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
@@ -76,10 +76,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table loc_orc compute statistics for columns state
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select a, c, min(b)
 from ( select state as a, locid as b, count(*) as c
@@ -177,10 +179,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select year from loc_orc group by year
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
index 4986879..ffcb20f 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
@@ -29,10 +29,12 @@ POSTHOOK: Output: default@location
 PREHOOK: query: analyze table location compute statistics for columns state, country
 PREHOOK: type: QUERY
 PREHOOK: Input: default@location
+PREHOOK: Output: default@location
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table location compute statistics for columns state, country
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@location
+POSTHOOK: Output: default@location
 #### A masked pattern was here ####
 PREHOOK: query: explain select state, country from location group by state, country
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/annotate_stats_join.q.out
index a7e73a0..9173e7a 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join.q.out
@@ -97,26 +97,32 @@ POSTHOOK: Output: default@loc
 PREHOOK: query: analyze table emp compute statistics for columns lastname,deptid,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@emp
+PREHOOK: Output: default@emp
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table emp compute statistics for columns lastname,deptid,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@emp
+POSTHOOK: Output: default@emp
 #### A masked pattern was here ####
 PREHOOK: query: analyze table dept compute statistics for columns deptname,deptid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dept
+PREHOOK: Output: default@dept
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table dept compute statistics for columns deptname,deptid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dept
+POSTHOOK: Output: default@dept
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc compute statistics for columns state,locid,zip,year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc
+PREHOOK: Output: default@loc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc compute statistics for columns state,locid,zip,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc
+POSTHOOK: Output: default@loc
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
index a73e34d..bec98d0 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
@@ -297,10 +297,12 @@ POSTHOOK: Output: default@store
 PREHOOK: query: analyze table store compute statistics for columns s_store_sk, s_floor_space
 PREHOOK: type: QUERY
 PREHOOK: Input: default@store
+PREHOOK: Output: default@store
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table store compute statistics for columns s_store_sk, s_floor_space
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@store
+POSTHOOK: Output: default@store
 #### A masked pattern was here ####
 PREHOOK: query: analyze table store_bigint compute statistics
 PREHOOK: type: QUERY
@@ -313,10 +315,12 @@ POSTHOOK: Output: default@store_bigint
 PREHOOK: query: analyze table store_bigint compute statistics for columns s_store_sk, s_floor_space
 PREHOOK: type: QUERY
 PREHOOK: Input: default@store_bigint
+PREHOOK: Output: default@store_bigint
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table store_bigint compute statistics for columns s_store_sk, s_floor_space
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@store_bigint
+POSTHOOK: Output: default@store_bigint
 #### A masked pattern was here ####
 PREHOOK: query: analyze table store_sales compute statistics
 PREHOOK: type: QUERY
@@ -329,10 +333,12 @@ POSTHOOK: Output: default@store_sales
 PREHOOK: query: analyze table store_sales compute statistics for columns ss_store_sk, ss_addr_sk, ss_quantity
 PREHOOK: type: QUERY
 PREHOOK: Input: default@store_sales
+PREHOOK: Output: default@store_sales
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table store_sales compute statistics for columns ss_store_sk, ss_addr_sk, ss_quantity
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@store_sales
+POSTHOOK: Output: default@store_sales
 #### A masked pattern was here ####
 PREHOOK: query: analyze table customer_address compute statistics
 PREHOOK: type: QUERY
@@ -345,10 +351,12 @@ POSTHOOK: Output: default@customer_address
 PREHOOK: query: analyze table customer_address compute statistics for columns ca_address_sk
 PREHOOK: type: QUERY
 PREHOOK: Input: default@customer_address
+PREHOOK: Output: default@customer_address
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table customer_address compute statistics for columns ca_address_sk
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@customer_address
+POSTHOOK: Output: default@customer_address
 #### A masked pattern was here ####
 PREHOOK: query: explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk = ss.ss_store_sk)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_limit.q.out b/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
index ea181cb..5139db4 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_limit.q.out
@@ -55,10 +55,12 @@ POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name
 PREHOOK: query: analyze table loc_orc compute statistics for columns state, locid, zip, year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state, locid, zip, year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from loc_orc
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out
index def4d4f..3a94a6a 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out
@@ -273,11 +273,15 @@ PREHOOK: query: analyze table loc_orc partition(year='2001') compute statistics
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
 PREHOOK: Input: default@loc_orc@year=2001
+PREHOOK: Output: default@loc_orc
+PREHOOK: Output: default@loc_orc@year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc partition(year='2001') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
 POSTHOOK: Input: default@loc_orc@year=2001
+POSTHOOK: Output: default@loc_orc
+POSTHOOK: Output: default@loc_orc@year=2001
 #### A masked pattern was here ####
 PREHOOK: query: explain select zip from loc_orc
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out b/ql/src/test/results/clientpositive/annotate_stats_select.q.out
index e10ce1a..9aaa6f6 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out
@@ -113,10 +113,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from alltypes_orc
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out
index ff7b403..f61e8d8 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out
@@ -120,10 +120,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table emp_orc compute statistics for columns deptid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@emp_orc
+PREHOOK: Output: default@emp_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table emp_orc compute statistics for columns deptid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@emp_orc
+POSTHOOK: Output: default@emp_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from emp_orc
 PREHOOK: type: QUERY
@@ -170,10 +172,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table emp_orc compute statistics for columns lastname,deptid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@emp_orc
+PREHOOK: Output: default@emp_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table emp_orc compute statistics for columns lastname,deptid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@emp_orc
+POSTHOOK: Output: default@emp_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from emp_orc
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/annotate_stats_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_union.q.out b/ql/src/test/results/clientpositive/annotate_stats_union.q.out
index 059f261..3b4b169 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_union.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_union.q.out
@@ -55,10 +55,12 @@ POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name
 PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select state from loc_orc
 PREHOOK: type: QUERY
@@ -282,18 +284,22 @@ PREHOOK: query: analyze table loc_staging compute statistics for columns state,l
 PREHOOK: type: QUERY
 PREHOOK: Input: test@loc_staging
 #### A masked pattern was here ####
+PREHOOK: Output: test@loc_staging
 POSTHOOK: query: analyze table loc_staging compute statistics for columns state,locid,zip,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: test@loc_staging
 #### A masked pattern was here ####
+POSTHOOK: Output: test@loc_staging
 PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year
 PREHOOK: type: QUERY
 PREHOOK: Input: test@loc_orc
 #### A masked pattern was here ####
+PREHOOK: Output: test@loc_orc
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: test@loc_orc
 #### A masked pattern was here ####
+POSTHOOK: Output: test@loc_orc
 PREHOOK: query: explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp


[10/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out
index 176989c..30a5ba9 100644
--- a/ql/src/test/results/clientpositive/join33.q.out
+++ b/ql/src/test/results/clientpositive/join33.q.out
@@ -366,7 +366,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join34.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join34.q.out b/ql/src/test/results/clientpositive/join34.q.out
index 67599bc..072a20b 100644
--- a/ql/src/test/results/clientpositive/join34.q.out
+++ b/ql/src/test/results/clientpositive/join34.q.out
@@ -309,7 +309,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join35.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join35.q.out b/ql/src/test/results/clientpositive/join35.q.out
index ade6646..f8fd438 100644
--- a/ql/src/test/results/clientpositive/join35.q.out
+++ b/ql/src/test/results/clientpositive/join35.q.out
@@ -393,7 +393,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join36.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join36.q.out b/ql/src/test/results/clientpositive/join36.q.out
index 43a091f..0fd4d91 100644
--- a/ql/src/test/results/clientpositive/join36.q.out
+++ b/ql/src/test/results/clientpositive/join36.q.out
@@ -133,7 +133,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join37.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join37.q.out b/ql/src/test/results/clientpositive/join37.q.out
index b0a2ee3..c8bef83 100644
--- a/ql/src/test/results/clientpositive/join37.q.out
+++ b/ql/src/test/results/clientpositive/join37.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join39.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join39.q.out b/ql/src/test/results/clientpositive/join39.q.out
index c656762..93ebdb8 100644
--- a/ql/src/test/results/clientpositive/join39.q.out
+++ b/ql/src/test/results/clientpositive/join39.q.out
@@ -86,7 +86,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join4.q.out b/ql/src/test/results/clientpositive/join4.q.out
index f11d995..d4e83c4 100644
--- a/ql/src/test/results/clientpositive/join4.q.out
+++ b/ql/src/test/results/clientpositive/join4.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join5.q.out b/ql/src/test/results/clientpositive/join5.q.out
index 99cfe1a..8cfe402 100644
--- a/ql/src/test/results/clientpositive/join5.q.out
+++ b/ql/src/test/results/clientpositive/join5.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join6.q.out b/ql/src/test/results/clientpositive/join6.q.out
index ce2ec8b..8d6578a 100644
--- a/ql/src/test/results/clientpositive/join6.q.out
+++ b/ql/src/test/results/clientpositive/join6.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join7.q.out b/ql/src/test/results/clientpositive/join7.q.out
index d8adc39..f1af54a 100644
--- a/ql/src/test/results/clientpositive/join7.q.out
+++ b/ql/src/test/results/clientpositive/join7.q.out
@@ -138,7 +138,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join8.q.out b/ql/src/test/results/clientpositive/join8.q.out
index 3784824..ce5a5d4 100644
--- a/ql/src/test/results/clientpositive/join8.q.out
+++ b/ql/src/test/results/clientpositive/join8.q.out
@@ -113,7 +113,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join9.q.out b/ql/src/test/results/clientpositive/join9.q.out
index a96f341..58f8d7d 100644
--- a/ql/src/test/results/clientpositive/join9.q.out
+++ b/ql/src/test/results/clientpositive/join9.q.out
@@ -240,7 +240,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_map_ppr.q.out b/ql/src/test/results/clientpositive/join_map_ppr.q.out
index a4d4140..6eb9889 100644
--- a/ql/src/test/results/clientpositive/join_map_ppr.q.out
+++ b/ql/src/test/results/clientpositive/join_map_ppr.q.out
@@ -225,7 +225,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-5
@@ -803,7 +804,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out b/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out
index eeb47f7..7d0abfd 100644
--- a/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out
+++ b/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out
@@ -296,7 +296,8 @@ STAGE PLANS:
               name: default.dest_2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -339,5 +340,6 @@ STAGE PLANS:
               name: default.dest_3
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/lineage1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage1.q.out b/ql/src/test/results/clientpositive/lineage1.q.out
index 6c8a22f..7d6af3e 100644
--- a/ql/src/test/results/clientpositive/lineage1.q.out
+++ b/ql/src/test/results/clientpositive/lineage1.q.out
@@ -147,7 +147,8 @@ STAGE PLANS:
               name: default.dest_l1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
index a664db3..2eca946 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
@@ -197,7 +197,8 @@ STAGE PLANS:
               name: default.list_bucketing_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out
index bf987e1..6b34745 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out
@@ -150,7 +150,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
index 3c80cef..a0bc3e9 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
@@ -150,7 +150,8 @@ STAGE PLANS:
               name: default.list_bucketing_mul_col
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
index 379a730..df8717b 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
@@ -150,7 +150,8 @@ STAGE PLANS:
               name: default.list_bucketing_mul_col
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
index b6b829a..6b48f3d 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
@@ -146,7 +146,8 @@ STAGE PLANS:
               name: default.list_bucketing
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing select * from src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out
index 8167730..512aeab 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out
@@ -201,7 +201,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
index aafdbb1..e86bf0a 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
@@ -191,7 +191,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
index 36bd666..7f90ac5 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
@@ -201,7 +201,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
@@ -469,7 +470,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
index 23ec5f4..64fdacb 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
@@ -197,7 +197,8 @@ STAGE PLANS:
               name: default.list_bucketing_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
index 1e01ca3..32dac42 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
@@ -201,7 +201,8 @@ STAGE PLANS:
               name: default.list_bucketing_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
@@ -516,7 +517,8 @@ STAGE PLANS:
               name: default.list_bucketing_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
index eea69ff..7c11d3f 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
@@ -201,7 +201,8 @@ STAGE PLANS:
               name: default.list_bucketing_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
@@ -516,7 +517,8 @@ STAGE PLANS:
               name: default.list_bucketing_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out
index 0215f5d..d45be4e 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out
@@ -201,7 +201,8 @@ STAGE PLANS:
               name: default.list_bucketing_dynamic_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
index fdc7a3e..9df2130 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
@@ -201,7 +201,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
@@ -469,7 +470,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
index 77bea79..86a9bf5 100644
--- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
@@ -86,6 +86,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                   output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                   properties:
+                    COLUMN_STATS_ACCURATE {}
                     bucket_count 16
                     bucket_field_name a
                     column.name.delimiter ,
@@ -109,6 +110,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                     output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE {}
                       bucket_count 16
                       bucket_field_name a
                       column.name.delimiter ,

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
index 388ff67..9b83fad 100644
--- a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
@@ -169,7 +169,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update srcpart_acid set value = concat(value, 'updated') where cast(key as integer) in(413,43) and hr='11'
 PREHOOK: type: QUERY
@@ -348,7 +349,8 @@ STAGE PLANS:
           Write Type: DELETE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: delete from srcpart_acid where key in( '1001', '213', '43')
 PREHOOK: type: QUERY
@@ -665,7 +667,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update srcpart_acidb set value = concat(value, 'updated') where cast(key as integer) in(413,43) and hr='11'
 PREHOOK: type: QUERY
@@ -845,7 +848,8 @@ STAGE PLANS:
           Write Type: DELETE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: delete from srcpart_acidb where key in( '1001', '213', '43')
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
index ab74d0d..1a2aff7 100644
--- a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
@@ -618,7 +618,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/autoColumnStats_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/autoColumnStats_1.q.out b/ql/src/test/results/clientpositive/llap/autoColumnStats_1.q.out
index 9fbc404..f93f666 100644
--- a/ql/src/test/results/clientpositive/llap/autoColumnStats_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/autoColumnStats_1.q.out
@@ -374,6 +374,44 @@ POSTHOOK: query: create table if not exists nzhang_part14 (key string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: desc formatted nzhang_part14
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numPartitions       	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
 PREHOOK: query: insert overwrite table nzhang_part14 partition(value) 
 select key, value from (
   select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a 
@@ -399,6 +437,42 @@ POSTHOOK: Output: default@nzhang_part14@value=
 POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
 POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION []
 POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
+PREHOOK: query: desc formatted nzhang_part14 partition (value=' ')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition (value=' ')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+value               	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[ ]                 	 
+Database:           	default             	 
+Table:              	nzhang_part14       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
+	numFiles            	1                   
+	numRows             	2                   
+	rawDataSize         	4                   
+	totalSize           	6                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
 PREHOOK: query: explain select key from nzhang_part14
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select key from nzhang_part14

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/autoColumnStats_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/autoColumnStats_10.q.out b/ql/src/test/results/clientpositive/llap/autoColumnStats_10.q.out
new file mode 100644
index 0000000..6cb51fd
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/autoColumnStats_10.q.out
@@ -0,0 +1,516 @@
+PREHOOK: query: drop table p
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table p
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@p
+POSTHOOK: query: CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@p
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	tinyint             	                    
+c2                  	smallint            	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\",\"insert_num\":\"true\"}}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into p values (1,22,333)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p
+POSTHOOK: query: insert into p values (1,22,333)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p
+POSTHOOK: Lineage: p.c1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: p.c2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: p.insert_num EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	tinyint             	                    
+c2                  	smallint            	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\",\"insert_num\":\"true\"}}
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	8                   
+	totalSize           	9                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: alter table p replace columns (insert_num int, c1 STRING, c2 STRING)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@p
+PREHOOK: Output: default@p
+POSTHOOK: query: alter table p replace columns (insert_num int, c1 STRING, c2 STRING)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@p
+POSTHOOK: Output: default@p
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}
+#### A masked pattern was here ####
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	8                   
+	totalSize           	9                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted p insert_num
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p insert_num
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	insert_num          	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	1                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	1                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: desc formatted p c1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p c1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	c1                  	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: insert into p values (2,11,111)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p
+POSTHOOK: query: insert into p values (2,11,111)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p
+POSTHOOK: Lineage: p.c1 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: p.c2 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: p.insert_num EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}
+#### A masked pattern was here ####
+	numFiles            	2                   
+	numRows             	2                   
+	rawDataSize         	16                  
+	totalSize           	18                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted p insert_num
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p insert_num
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	insert_num          	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	2                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	2                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: desc formatted p c1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p c1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	c1                  	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: drop table p
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@p
+PREHOOK: Output: default@p
+POSTHOOK: query: drop table p
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@p
+POSTHOOK: Output: default@p
+PREHOOK: query: CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@p
+POSTHOOK: query: CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@p
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	tinyint             	                    
+c2                  	smallint            	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\",\"insert_num\":\"true\"}}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into p values (1,22,333)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p
+POSTHOOK: query: insert into p values (1,22,333)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p
+POSTHOOK: Lineage: p.c1 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: p.c2 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: p.insert_num EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	tinyint             	                    
+c2                  	smallint            	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	8                   
+	totalSize           	9                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: alter table p replace columns (insert_num int, c1 STRING, c2 STRING)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@p
+PREHOOK: Output: default@p
+POSTHOOK: query: alter table p replace columns (insert_num int, c1 STRING, c2 STRING)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@p
+POSTHOOK: Output: default@p
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+#### A masked pattern was here ####
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	8                   
+	totalSize           	9                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted p insert_num
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p insert_num
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	insert_num          	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: desc formatted p c1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p c1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	c1                  	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: insert into p values (2,11,111)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p
+POSTHOOK: query: insert into p values (2,11,111)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p
+POSTHOOK: Lineage: p.c1 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: p.c2 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: p.insert_num EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+#### A masked pattern was here ####
+	numFiles            	2                   
+	numRows             	2                   
+	rawDataSize         	16                  
+	totalSize           	18                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted p insert_num
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p insert_num
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	insert_num          	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: desc formatted p c1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p c1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	c1                  	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/autoColumnStats_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/autoColumnStats_2.q.out b/ql/src/test/results/clientpositive/llap/autoColumnStats_2.q.out
index 104c25f..0cc0c05 100644
--- a/ql/src/test/results/clientpositive/llap/autoColumnStats_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/autoColumnStats_2.q.out
@@ -747,7 +747,6 @@ Database:           	default
 Table:              	alter5              	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"col1\":\"true\"}}
 	numFiles            	1                   
 	totalSize           	1906                
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_join1.q.out b/ql/src/test/results/clientpositive/llap/auto_join1.q.out
index 5329f84..bbe63e2 100644
--- a/ql/src/test/results/clientpositive/llap/auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_join1.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
index c6b14ab..80a74fb 100644
--- a/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
@@ -1612,7 +1612,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1625,7 +1626,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
@@ -1850,7 +1852,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1863,7 +1866,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out
index f358a59..a6d7309 100644
--- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out
@@ -165,7 +165,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -178,7 +179,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
@@ -366,7 +368,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -379,7 +382,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
@@ -567,7 +571,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -580,7 +585,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucket2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket2.q.out b/ql/src/test/results/clientpositive/llap/bucket2.q.out
index e0c92ce..9954c2d 100644
--- a/ql/src/test/results/clientpositive/llap/bucket2.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket2.q.out
@@ -174,7 +174,8 @@ STAGE PLANS:
               name: default.bucket2_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket2_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket3.q.out b/ql/src/test/results/clientpositive/llap/bucket3.q.out
index 8e6d85c..218f9b7 100644
--- a/ql/src/test/results/clientpositive/llap/bucket3.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket3.q.out
@@ -171,7 +171,8 @@ STAGE PLANS:
               name: default.bucket3_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucket4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket4.q.out b/ql/src/test/results/clientpositive/llap/bucket4.q.out
index 5fbffc9..2115565 100644
--- a/ql/src/test/results/clientpositive/llap/bucket4.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket4.q.out
@@ -177,7 +177,8 @@ STAGE PLANS:
               name: default.bucket4_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucket5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket5.q.out b/ql/src/test/results/clientpositive/llap/bucket5.q.out
index 2357623..680dbd9 100644
--- a/ql/src/test/results/clientpositive/llap/bucket5.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket5.q.out
@@ -248,7 +248,8 @@ STAGE PLANS:
               name: default.bucketed_table
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-1
@@ -280,7 +281,8 @@ STAGE PLANS:
               name: default.unbucketed_table
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucket6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket6.q.out b/ql/src/test/results/clientpositive/llap/bucket6.q.out
index 20895f8..42f062b 100644
--- a/ql/src/test/results/clientpositive/llap/bucket6.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket6.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.src_bucket
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table src_bucket select key,value from srcpart
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucket_many.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket_many.q.out b/ql/src/test/results/clientpositive/llap/bucket_many.q.out
index b78cbaa..4f3bee2 100644
--- a/ql/src/test/results/clientpositive/llap/bucket_many.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket_many.q.out
@@ -174,7 +174,8 @@ STAGE PLANS:
               name: default.bucket_many
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket_many

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out
index c6ab95c..fa6a2d0 100644
--- a/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out
@@ -638,7 +638,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -1004,7 +1005,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out
index c254d89..57eead0 100644
--- a/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out
@@ -352,7 +352,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -724,7 +725,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -1162,7 +1164,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out
index 1678135..184e890 100644
--- a/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out
@@ -376,7 +376,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -748,7 +749,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out
index b871c93..b353073 100644
--- a/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out
@@ -372,7 +372,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
@@ -728,7 +729,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out
index 13eecea..b907c2d 100644
--- a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out
@@ -189,7 +189,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -368,7 +369,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -571,7 +573,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -780,7 +783,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -971,7 +975,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.v1, b.v2) 
@@ -1162,7 +1167,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key+a.key, concat(a.value, b.value) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out
index 9959ba4..f5f5f91 100644
--- a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out
@@ -167,7 +167,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, a.key2, concat(a.value, b.value) 
@@ -353,7 +354,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT subq1.key, subq1.key2, subq1.value from
@@ -539,7 +541,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
@@ -660,7 +663,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
@@ -787,7 +791,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT subq2.key, subq2.key2, subq2.value from
@@ -991,7 +996,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT subq2.k2, subq2.k1, subq2.value from
@@ -1205,5 +1211,6 @@ STAGE PLANS:
               name: default.test_table4
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out
index c5e2973..7b38056 100644
--- a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out
@@ -167,7 +167,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -350,7 +351,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -539,7 +541,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/column_table_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/column_table_stats.q.out b/ql/src/test/results/clientpositive/llap/column_table_stats.q.out
index f073720..a6add6a 100644
--- a/ql/src/test/results/clientpositive/llap/column_table_stats.q.out
+++ b/ql/src/test/results/clientpositive/llap/column_table_stats.q.out
@@ -58,7 +58,6 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-2 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -175,11 +174,9 @@ STAGE PLANS:
                   MultiFileSpray: false
 
   Stage: Stage-2
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: default.s/
-
-  Stage: Stage-3
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.s/
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string
@@ -314,7 +311,6 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-2 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -490,11 +486,9 @@ STAGE PLANS:
                     MultiFileSpray: false
 
   Stage: Stage-2
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: default.spart/
-
-  Stage: Stage-3
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.spart/
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string
@@ -723,7 +717,6 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-2 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -899,11 +892,9 @@ STAGE PLANS:
                     MultiFileSpray: false
 
   Stage: Stage-2
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: default.spart/
-
-  Stage: Stage-3
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.spart/
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string
@@ -1132,7 +1123,6 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-2 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -1260,11 +1250,9 @@ STAGE PLANS:
                     MultiFileSpray: false
 
   Stage: Stage-2
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: default.spart/
-
-  Stage: Stage-3
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.spart/
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out b/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out
index 4bc5b39..6e71803 100644
--- a/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out
+++ b/ql/src/test/results/clientpositive/llap/column_table_stats_orc.q.out
@@ -59,7 +59,6 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-2 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -178,10 +177,8 @@ STAGE PLANS:
                   MultiFileSpray: false
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-3
-    Column Stats Work
+    Stats Work
+      Basic Stats NoJob Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string
@@ -311,7 +308,6 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-2 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -481,10 +477,8 @@ STAGE PLANS:
                     MultiFileSpray: false
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-3
-    Column Stats Work
+    Stats Work
+      Basic Stats NoJob Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string
@@ -708,7 +702,6 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-2 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -833,10 +826,8 @@ STAGE PLANS:
                     MultiFileSpray: false
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-3
-    Column Stats Work
+    Stats Work
+      Basic Stats NoJob Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ctas.q.out b/ql/src/test/results/clientpositive/llap/ctas.q.out
index 1dd2dd1..7240da6 100644
--- a/ql/src/test/results/clientpositive/llap/ctas.q.out
+++ b/ql/src/test/results/clientpositive/llap/ctas.q.out
@@ -97,7 +97,8 @@ STAGE PLANS:
           name: default.nzhang_CTAS1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -252,7 +253,8 @@ STAGE PLANS:
           name: default.nzhang_ctas2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -407,7 +409,8 @@ STAGE PLANS:
           name: default.nzhang_ctas3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -626,7 +629,8 @@ STAGE PLANS:
           name: default.nzhang_ctas4
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -785,7 +789,8 @@ STAGE PLANS:
           name: default.nzhang_ctas5
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out b/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out
index 45b9af6..d2552fe 100644
--- a/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out
+++ b/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out
@@ -94,6 +94,42 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@testdeci2
 POSTHOOK: Output: default@testdeci2
 #### A masked pattern was here ####
+PREHOOK: query: describe formatted testdeci2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@testdeci2
+POSTHOOK: query: describe formatted testdeci2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@testdeci2
+# col_name            	data_type           	comment             
+id                  	int                 	                    
+amount              	decimal(10,3)       	                    
+sales_tax           	decimal(10,3)       	                    
+item                	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"amount\":\"true\",\"id\":\"true\",\"item\":\"true\",\"sales_tax\":\"true\"}}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
 PREHOOK: query: analyze table testdeci2 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@testdeci2
@@ -129,18 +165,18 @@ Stage-0
     Stage-1
       Reducer 2 llap
       File Output Operator [FS_10]
-        Select Operator [SEL_9] (rows=2 width=228)
+        Select Operator [SEL_9] (rows=1 width=228)
           Output:["_col0","_col1","_col2"]
-          Merge Join Operator [MERGEJOIN_15] (rows=2 width=228)
+          Merge Join Operator [MERGEJOIN_15] (rows=1 width=228)
             Conds:RS_6._col1=RS_7._col3(Inner),Output:["_col0","_col3","_col4"]
           <-Map 1 [SIMPLE_EDGE] llap
             SHUFFLE [RS_6]
               PartitionCols:_col1
-              Select Operator [SEL_2] (rows=2 width=88)
+              Select Operator [SEL_2] (rows=1 width=88)
                 Output:["_col0","_col1"]
-                Filter Operator [FIL_13] (rows=2 width=88)
+                Filter Operator [FIL_13] (rows=1 width=88)
                   predicate:item is not null
-                  TableScan [TS_0] (rows=2 width=88)
+                  TableScan [TS_0] (rows=1 width=88)
                     default@testdeci2,s,Tbl:COMPLETE,Col:COMPLETE,Output:["id","item"]
           <-Map 3 [SIMPLE_EDGE] llap
             SHUFFLE [RS_7]
@@ -149,6 +185,6 @@ Stage-0
                 Output:["_col1","_col2","_col3"]
                 Filter Operator [FIL_14] (rows=1 width=312)
                   predicate:((id = 2) and item is not null)
-                  TableScan [TS_3] (rows=2 width=312)
+                  TableScan [TS_3] (rows=1 width=312)
                     default@testdeci2,d,Tbl:COMPLETE,Col:COMPLETE,Output:["id","amount","sales_tax","item"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out b/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out
index e31ebaf..acd6076 100644
--- a/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out
+++ b/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out
@@ -174,7 +174,8 @@ STAGE PLANS:
               name: default.bucket2_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket2_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
index 3c3cd74..025c484 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
@@ -108,7 +108,8 @@ STAGE PLANS:
           name: default.srcpart_date
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out
index 374d4b8..6772e5d 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out
@@ -259,7 +259,8 @@ STAGE PLANS:
           Write Type: DELETE
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -273,7 +274,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator
@@ -286,7 +288,8 @@ STAGE PLANS:
               name: default.merge_tmp_table
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -300,7 +303,8 @@ STAGE PLANS:
           Write Type: INSERT
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain merge into acidTbl as t using nonAcidOrcTbl s ON t.a = s.a
 WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b)
@@ -433,7 +437,8 @@ STAGE PLANS:
           Write Type: INSERT
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain merge into acidTbl as t using (
   select * from nonAcidOrcTbl where a > 0
@@ -734,7 +739,8 @@ STAGE PLANS:
           Write Type: DELETE
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -748,7 +754,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator
@@ -761,7 +768,8 @@ STAGE PLANS:
               name: default.merge_tmp_table
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -775,7 +783,8 @@ STAGE PLANS:
           Write Type: INSERT
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop database if exists type2_scd_helper cascade
 PREHOOK: type: DROPDATABASE


[13/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
index c5e03be..7eb36bf 100644
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
+++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out
@@ -78,7 +78,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT x.value, x.key from 
@@ -196,7 +197,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT x.key, x.value from 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
index 1d794c3..7efb7ce 100644
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
+++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out
@@ -148,7 +148,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-8
     Map Reduce Local Work
@@ -427,7 +428,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-8
     Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out
index 1e70105..6c10249 100644
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out
+++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out
@@ -156,7 +156,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-8
     Map Reduce Local Work
@@ -437,7 +438,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-8
     Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out
index 1b3d741..7f56cf3 100644
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out
+++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out
@@ -156,7 +156,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-8
     Map Reduce Local Work
@@ -435,7 +436,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-8
     Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/case_sensitivity.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/case_sensitivity.q.out b/ql/src/test/results/clientpositive/case_sensitivity.q.out
index b3969cc..c2f2f8b 100644
--- a/ql/src/test/results/clientpositive/case_sensitivity.q.out
+++ b/ql/src/test/results/clientpositive/case_sensitivity.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/cast1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cast1.q.out b/ql/src/test/results/clientpositive/cast1.q.out
index 9feb14f..a0cd142 100644
--- a/ql/src/test/results/clientpositive/cast1.q.out
+++ b/ql/src/test/results/clientpositive/cast1.q.out
@@ -65,7 +65,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
index a4c9a6a..eac491e 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
@@ -76,10 +76,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table loc_orc compute statistics for columns state
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select a, c, min(b)
 from ( select state as a, locid as b, count(*) as c
@@ -177,10 +179,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select year from loc_orc group by year
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
index afc4fd9..7fddda8 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
@@ -49,10 +49,12 @@ POSTHOOK: Output: default@tbl1
 PREHOOK: query: analyze table tbl1 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl1
+PREHOOK: Output: default@tbl1
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tbl1 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl1
+POSTHOOK: Output: default@tbl1
 #### A masked pattern was here ####
 PREHOOK: query: analyze table tbl2 compute statistics
 PREHOOK: type: QUERY
@@ -65,10 +67,12 @@ POSTHOOK: Output: default@tbl2
 PREHOOK: query: analyze table tbl2 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl2
+PREHOOK: Output: default@tbl2
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tbl2 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl2
+POSTHOOK: Output: default@tbl2
 #### A masked pattern was here ####
 PREHOOK: query: explain
 select count(*) from (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out
index b296280..d3747af 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join17.q.out
@@ -91,7 +91,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out
index d4d70bc..e5d4421 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out
@@ -77,7 +77,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1
@@ -187,7 +188,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out
index c09764c..177eefd 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_groupby3_noskew_multi_distinct.q.out
@@ -86,7 +86,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
index b56ad7f..77e4bf9 100644
--- a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
+++ b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
@@ -30,10 +30,12 @@ POSTHOOK: Lineage: all_nulls.c SIMPLE []
 PREHOOK: query: analyze table all_nulls compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@all_nulls
+PREHOOK: Output: default@all_nulls
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table all_nulls compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@all_nulls
+POSTHOOK: Output: default@all_nulls
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted all_nulls a
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
index 0d78ea2..ee0da96 100644
--- a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
+++ b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
@@ -55,10 +55,12 @@ Storage Desc Params:
 PREHOOK: query: analyze table calendar compute statistics for columns year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@calendar
+PREHOOK: Output: default@calendar
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table calendar compute statistics for columns year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@calendar
+POSTHOOK: Output: default@calendar
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted calendar
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out b/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out
index 66ff79f..a347b01 100644
--- a/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out
+++ b/ql/src/test/results/clientpositive/column_pruner_multiple_children.q.out
@@ -27,8 +27,7 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-  Stage-4 depends on stages: Stage-2, Stage-3
+  Stage-2 depends on stages: Stage-0, Stage-3
   Stage-3 depends on stages: Stage-1
 
 STAGE PLANS:
@@ -89,10 +88,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-4
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: int, string

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out b/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out
index 018e18f..feac97e 100644
--- a/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out
+++ b/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out
@@ -60,7 +60,8 @@ STAGE PLANS:
               name: default.columnarserde_create_shortcut
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src_thrift
 INSERT OVERWRITE TABLE columnarserde_create_shortcut SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/columnstats_infinity.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnstats_infinity.q.out b/ql/src/test/results/clientpositive/columnstats_infinity.q.out
index 777345c..6b31882 100644
--- a/ql/src/test/results/clientpositive/columnstats_infinity.q.out
+++ b/ql/src/test/results/clientpositive/columnstats_infinity.q.out
@@ -117,10 +117,12 @@ Storage Desc Params:
 PREHOOK: query: analyze table table_change_numeric_group_string_group_floating_string_group compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group
+PREHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table table_change_numeric_group_string_group_floating_string_group compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@table_change_numeric_group_string_group_floating_string_group
+POSTHOOK: Output: default@table_change_numeric_group_string_group_floating_string_group
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: DESCTABLE
@@ -273,7 +275,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"c1\":\"true\",\"c10\":\"true\",\"c12\":\"true\",\"c13\":\"true\",\"c15\":\"true\",\"c3\":\"true\",\"c4\":\"true\",\"c6\":\"true\",\"c7\":\"true\",\"c9\":\"true\",\"insert_num\":\"true\"}}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"b\":\"true\",\"c1\":\"true\",\"c10\":\"true\",\"c11\":\"true\",\"c12\":\"true\",\"c13\":\"true\",\"c14\":\"true\",\"c15\":\"true\",\"c2\":\"true\",\"c3\":\"true\",\"c4\":\"true\",\"c5\":\"true\",\"c6\":\"true\",\"c7\":\"true\",\"c8\":\"true\",\"c9\":\"true\",\"insert_num\":\"true\"}}
 	numFiles            	1                   
 	numRows             	5                   
 	rawDataSize         	1250                

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/columnstats_partlvl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnstats_partlvl.q.out b/ql/src/test/results/clientpositive/columnstats_partlvl.q.out
index 08f1709..13b9608 100644
--- a/ql/src/test/results/clientpositive/columnstats_partlvl.q.out
+++ b/ql/src/test/results/clientpositive/columnstats_partlvl.q.out
@@ -83,7 +83,8 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeID
           Column Types: int
@@ -106,7 +107,8 @@ STAGE PLANS:
           TableScan
             alias: employee_part
             Statistics: Num rows: 26 Data size: 105 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
+            Statistics Aggregation Key Prefix: default.employee_part/
+            GatherStats: true
             Select Operator
               expressions: employeeid (type: int)
               outputColumnNames: employeeid
@@ -215,7 +217,9 @@ STAGE PLANS:
               MultiFileSpray: false
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.employee_part/
       Column Stats Desc:
           Columns: employeeID
           Column Types: int
@@ -226,12 +230,54 @@ PREHOOK: query: analyze table Employee_Part partition (employeeSalary=2000.0) co
 PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=2000.0
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part partition (employeeSalary=2000.0) compute statistics for columns employeeID
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=2000.0
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0
 #### A masked pattern was here ####
+PREHOOK: query: describe formatted Employee_Part partition(employeeSalary=2000.0)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@employee_part
+POSTHOOK: query: describe formatted Employee_Part partition(employeeSalary=2000.0)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@employee_part
+# col_name            	data_type           	comment             
+employeeid          	int                 	                    
+employeename        	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+employeesalary      	double              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2000.0]            	 
+Database:           	default             	 
+Table:              	employee_part       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"employeeid\":\"true\"}}
+	numFiles            	1                   
+	numRows             	13                  
+	rawDataSize         	92                  
+	totalSize           	105                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	field.delim         	|                   
+	serialization.format	|                   
 PREHOOK: query: explain 
 analyze table Employee_Part partition (employeeSalary=4000.0) compute statistics for columns employeeID
 PREHOOK: type: QUERY
@@ -285,7 +331,8 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeID
           Column Types: int
@@ -308,7 +355,8 @@ STAGE PLANS:
           TableScan
             alias: employee_part
             Statistics: Num rows: 26 Data size: 105 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
+            Statistics Aggregation Key Prefix: default.employee_part/
+            GatherStats: true
             Select Operator
               expressions: employeeid (type: int)
               outputColumnNames: employeeid
@@ -417,7 +465,9 @@ STAGE PLANS:
               MultiFileSpray: false
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.employee_part/
       Column Stats Desc:
           Columns: employeeID
           Column Types: int
@@ -428,11 +478,15 @@ PREHOOK: query: analyze table Employee_Part partition (employeeSalary=4000.0) co
 PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=4000.0
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=4000.0
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part partition (employeeSalary=4000.0) compute statistics for columns employeeID
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=4000.0
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=4000.0
 #### A masked pattern was here ####
 PREHOOK: query: explain 
 analyze table Employee_Part partition (employeeSalary=2000.0) compute statistics for columns
@@ -450,22 +504,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: employee_part
-            Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: employeeid (type: int), employeename (type: string)
               outputColumnNames: employeeid, employeename
-              Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(employeeid, 'hll'), compute_stats(employeename, 'hll')
                 keys: 2000.0 (type: double)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: 2000.0 (type: double)
                   sort order: +
                   Map-reduce partition columns: 2000.0 (type: double)
-                  Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
@@ -473,21 +527,22 @@ STAGE PLANS:
           keys: 2000.0 (type: double)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), 2000.0 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeid, employeename
           Column Types: int, string
@@ -497,11 +552,15 @@ PREHOOK: query: analyze table Employee_Part partition (employeeSalary=2000.0) co
 PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=2000.0
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part partition (employeeSalary=2000.0) compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=2000.0
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee_Part partition (employeeSalary=2000.0) employeeID
 PREHOOK: type: DESCTABLE
@@ -555,22 +614,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: employee_part
-            Statistics: Num rows: 2 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 26 Data size: 184 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: employeeid (type: int), employeename (type: string), employeesalary (type: double)
               outputColumnNames: employeeid, employeename, employeesalary
-              Statistics: Num rows: 2 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 26 Data size: 184 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(employeeid, 'hll'), compute_stats(employeename, 'hll')
                 keys: employeesalary (type: double)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 2 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 26 Data size: 184 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: double)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: double)
-                  Statistics: Num rows: 2 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 26 Data size: 184 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
@@ -578,21 +637,22 @@ STAGE PLANS:
           keys: KEY._col0 (type: double)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeid, employeename
           Column Types: int, string
@@ -603,12 +663,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=2000.0
 PREHOOK: Input: default@employee_part@employeesalary=4000.0
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0
+PREHOOK: Output: default@employee_part@employeesalary=4000.0
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part  compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=2000.0
 POSTHOOK: Input: default@employee_part@employeesalary=4000.0
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0
+POSTHOOK: Output: default@employee_part@employeesalary=4000.0
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee_Part partition(employeeSalary=2000.0) employeeID
 PREHOOK: type: DESCTABLE
@@ -662,11 +728,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: employee_part
-            Statistics: Num rows: 2 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 26 Data size: 184 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: employeeid (type: int), employeename (type: string)
               outputColumnNames: employeeid, employeename
-              Statistics: Num rows: 2 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 26 Data size: 184 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(employeeid, 'hll'), compute_stats(employeename, 'hll')
                 mode: hash
@@ -691,7 +757,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeid, employeename
           Column Types: int, string
@@ -702,12 +769,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=2000.0
 PREHOOK: Input: default@employee_part@employeesalary=4000.0
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0
+PREHOOK: Output: default@employee_part@employeesalary=4000.0
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part  compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=2000.0
 POSTHOOK: Input: default@employee_part@employeesalary=4000.0
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0
+POSTHOOK: Output: default@employee_part@employeesalary=4000.0
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee_Part employeeID
 PREHOOK: type: DESCTABLE
@@ -744,11 +817,15 @@ PREHOOK: query: analyze table default.Employee_Part partition (employeeSalary=20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=2000.0
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table default.Employee_Part partition (employeeSalary=2000.0) compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=2000.0
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted default.Employee_Part partition (employeeSalary=2000.0) employeeID
 PREHOOK: type: DESCTABLE
@@ -774,12 +851,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=2000.0
 PREHOOK: Input: default@employee_part@employeesalary=4000.0
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0
+PREHOOK: Output: default@employee_part@employeesalary=4000.0
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table default.Employee_Part  compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=2000.0
 POSTHOOK: Input: default@employee_part@employeesalary=4000.0
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0
+POSTHOOK: Output: default@employee_part@employeesalary=4000.0
 #### A masked pattern was here ####
 PREHOOK: query: use default
 PREHOOK: type: SWITCHDATABASE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out b/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out
index a626597..414b715 100644
--- a/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out
+++ b/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out
@@ -119,7 +119,8 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeName, employeeID
           Column Types: string, int
@@ -129,12 +130,55 @@ PREHOOK: query: analyze table Employee_Part partition (employeeSalary='4000.0',
 PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
 #### A masked pattern was here ####
+PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@employee_part
+POSTHOOK: query: describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@employee_part
+# col_name            	data_type           	comment             
+employeeid          	int                 	                    
+employeename        	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+employeesalary      	double              	                    
+country             	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[4000.0, USA]       	 
+Database:           	default             	 
+Table:              	employee_part       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"employeeid\":\"true\",\"employeename\":\"true\"}}
+	numFiles            	1                   
+	numRows             	7                   
+	rawDataSize         	57                  
+	totalSize           	64                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	field.delim         	|                   
+	serialization.format	|                   
 PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA') employeeName
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@employee_part
@@ -206,7 +250,8 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeID
           Column Types: int
@@ -217,12 +262,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@employee_part
 PREHOOK: Input: default@employee_part@employeesalary=2000.0/country=UK
 PREHOOK: Input: default@employee_part@employeesalary=2000.0/country=USA
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
+PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee_part
 POSTHOOK: Input: default@employee_part@employeesalary=2000.0/country=UK
 POSTHOOK: Input: default@employee_part@employeesalary=2000.0/country=USA
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='2000.0', country='USA') employeeID
 PREHOOK: type: DESCTABLE
@@ -276,22 +327,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: employee_part
-            Statistics: Num rows: 116 Data size: 466 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 27 Data size: 206 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: employeeid (type: int), employeesalary (type: double), country (type: string)
               outputColumnNames: employeeid, employeesalary, country
-              Statistics: Num rows: 116 Data size: 466 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 27 Data size: 206 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(employeeid, 'hll')
                 keys: employeesalary (type: double), country (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 116 Data size: 466 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 27 Data size: 206 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: double), _col1 (type: string)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: double), _col1 (type: string)
-                  Statistics: Num rows: 116 Data size: 466 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 27 Data size: 206 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
@@ -299,21 +350,22 @@ STAGE PLANS:
           keys: KEY._col0 (type: double), KEY._col1 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 58 Data size: 233 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double), _col1 (type: string)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 58 Data size: 233 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 58 Data size: 233 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeID
           Column Types: int
@@ -328,6 +380,13 @@ PREHOOK: Input: default@employee_part@employeesalary=3000.0/country=UK
 PREHOOK: Input: default@employee_part@employeesalary=3000.0/country=USA
 PREHOOK: Input: default@employee_part@employeesalary=3500.0/country=UK
 PREHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
+PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
+PREHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK
+PREHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA
+PREHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK
+PREHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID
 POSTHOOK: type: QUERY
@@ -338,6 +397,13 @@ POSTHOOK: Input: default@employee_part@employeesalary=3000.0/country=UK
 POSTHOOK: Input: default@employee_part@employeesalary=3000.0/country=USA
 POSTHOOK: Input: default@employee_part@employeesalary=3500.0/country=UK
 POSTHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
+POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK
+POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA
+POSTHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK
+POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='3000.0', country='UK') employeeID
 PREHOOK: type: DESCTABLE
@@ -373,22 +439,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: employee_part
-            Statistics: Num rows: 2 Data size: 466 Basic stats: PARTIAL Column stats: NONE
+            Statistics: Num rows: 54 Data size: 412 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: employeeid (type: int), employeename (type: string), employeesalary (type: double), country (type: string)
               outputColumnNames: employeeid, employeename, employeesalary, country
-              Statistics: Num rows: 2 Data size: 466 Basic stats: PARTIAL Column stats: NONE
+              Statistics: Num rows: 54 Data size: 412 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(employeeid, 'hll'), compute_stats(employeename, 'hll')
                 keys: employeesalary (type: double), country (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 466 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 54 Data size: 412 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: double), _col1 (type: string)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: double), _col1 (type: string)
-                  Statistics: Num rows: 2 Data size: 466 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 54 Data size: 412 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
@@ -396,21 +462,22 @@ STAGE PLANS:
           keys: KEY._col0 (type: double), KEY._col1 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 233 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 27 Data size: 206 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double), _col1 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 1 Data size: 233 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 27 Data size: 206 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 233 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 27 Data size: 206 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: employeeid, employeename
           Column Types: int, string
@@ -425,6 +492,13 @@ PREHOOK: Input: default@employee_part@employeesalary=3000.0/country=UK
 PREHOOK: Input: default@employee_part@employeesalary=3000.0/country=USA
 PREHOOK: Input: default@employee_part@employeesalary=3500.0/country=UK
 PREHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA
+PREHOOK: Output: default@employee_part
+PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
+PREHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
+PREHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK
+PREHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA
+PREHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK
+PREHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee_Part partition (employeeSalary,country) compute statistics for columns
 POSTHOOK: type: QUERY
@@ -435,6 +509,13 @@ POSTHOOK: Input: default@employee_part@employeesalary=3000.0/country=UK
 POSTHOOK: Input: default@employee_part@employeesalary=3000.0/country=USA
 POSTHOOK: Input: default@employee_part@employeesalary=3500.0/country=UK
 POSTHOOK: Input: default@employee_part@employeesalary=4000.0/country=USA
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
+POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK
+POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA
+POSTHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK
+POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee_Part partition (employeeSalary='3500.0', country='UK') employeeName
 PREHOOK: type: DESCTABLE
@@ -511,6 +592,11 @@ PREHOOK: Input: default@employee@employeesalary=2000.0/country=UK
 PREHOOK: Input: default@employee@employeesalary=2000.0/country=USA
 PREHOOK: Input: default@employee@employeesalary=3000.0/country=UK
 PREHOOK: Input: default@employee@employeesalary=3500.0/country=UK
+PREHOOK: Output: default@employee
+PREHOOK: Output: default@employee@employeesalary=2000.0/country=UK
+PREHOOK: Output: default@employee@employeesalary=2000.0/country=USA
+PREHOOK: Output: default@employee@employeesalary=3000.0/country=UK
+PREHOOK: Output: default@employee@employeesalary=3500.0/country=UK
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee partition (employeeSalary,country) compute statistics for columns
 POSTHOOK: type: QUERY
@@ -519,6 +605,11 @@ POSTHOOK: Input: default@employee@employeesalary=2000.0/country=UK
 POSTHOOK: Input: default@employee@employeesalary=2000.0/country=USA
 POSTHOOK: Input: default@employee@employeesalary=3000.0/country=UK
 POSTHOOK: Input: default@employee@employeesalary=3500.0/country=UK
+POSTHOOK: Output: default@employee
+POSTHOOK: Output: default@employee@employeesalary=2000.0/country=UK
+POSTHOOK: Output: default@employee@employeesalary=2000.0/country=USA
+POSTHOOK: Output: default@employee@employeesalary=3000.0/country=UK
+POSTHOOK: Output: default@employee@employeesalary=3500.0/country=UK
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee partition (employeeSalary='3500.0', country='UK') employeeName
 PREHOOK: type: DESCTABLE
@@ -565,6 +656,13 @@ PREHOOK: Input: default@employee@employeesalary=3000.0/country=UK
 PREHOOK: Input: default@employee@employeesalary=3000.0/country=USA
 PREHOOK: Input: default@employee@employeesalary=3500.0/country=UK
 PREHOOK: Input: default@employee@employeesalary=4000.0/country=USA
+PREHOOK: Output: default@employee
+PREHOOK: Output: default@employee@employeesalary=2000.0/country=UK
+PREHOOK: Output: default@employee@employeesalary=2000.0/country=USA
+PREHOOK: Output: default@employee@employeesalary=3000.0/country=UK
+PREHOOK: Output: default@employee@employeesalary=3000.0/country=USA
+PREHOOK: Output: default@employee@employeesalary=3500.0/country=UK
+PREHOOK: Output: default@employee@employeesalary=4000.0/country=USA
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee partition (employeeSalary) compute statistics for columns
 POSTHOOK: type: QUERY
@@ -575,6 +673,13 @@ POSTHOOK: Input: default@employee@employeesalary=3000.0/country=UK
 POSTHOOK: Input: default@employee@employeesalary=3000.0/country=USA
 POSTHOOK: Input: default@employee@employeesalary=3500.0/country=UK
 POSTHOOK: Input: default@employee@employeesalary=4000.0/country=USA
+POSTHOOK: Output: default@employee
+POSTHOOK: Output: default@employee@employeesalary=2000.0/country=UK
+POSTHOOK: Output: default@employee@employeesalary=2000.0/country=USA
+POSTHOOK: Output: default@employee@employeesalary=3000.0/country=UK
+POSTHOOK: Output: default@employee@employeesalary=3000.0/country=USA
+POSTHOOK: Output: default@employee@employeesalary=3500.0/country=UK
+POSTHOOK: Output: default@employee@employeesalary=4000.0/country=USA
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee partition (employeeSalary='3000.0', country='USA') employeeName
 PREHOOK: type: DESCTABLE
@@ -615,11 +720,15 @@ PREHOOK: query: analyze table Employee partition (employeeSalary='6000.0',countr
 PREHOOK: type: QUERY
 PREHOOK: Input: default@employee
 PREHOOK: Input: default@employee@employeesalary=6000.0/country=UK
+PREHOOK: Output: default@employee
+PREHOOK: Output: default@employee@employeesalary=6000.0/country=UK
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table Employee partition (employeeSalary='6000.0',country='UK') compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@employee
 POSTHOOK: Input: default@employee@employeesalary=6000.0/country=UK
+POSTHOOK: Output: default@employee
+POSTHOOK: Output: default@employee@employeesalary=6000.0/country=UK
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted Employee partition (employeeSalary='6000.0', country='UK') employeeName
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/columnstats_quoting.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnstats_quoting.q.out b/ql/src/test/results/clientpositive/columnstats_quoting.q.out
index b17ce0e..683c1e2 100644
--- a/ql/src/test/results/clientpositive/columnstats_quoting.q.out
+++ b/ql/src/test/results/clientpositive/columnstats_quoting.q.out
@@ -53,7 +53,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: user id, user name
           Column Types: bigint, string
@@ -62,10 +63,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table user_web_events compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@user_web_events
+PREHOOK: Output: default@user_web_events
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table user_web_events compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@user_web_events
+POSTHOOK: Output: default@user_web_events
 #### A masked pattern was here ####
 PREHOOK: query: explain analyze table user_web_events compute statistics for columns `user id`
 PREHOOK: type: QUERY
@@ -110,7 +113,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: user id
           Column Types: bigint
@@ -119,8 +123,10 @@ STAGE PLANS:
 PREHOOK: query: analyze table user_web_events compute statistics for columns `user id`
 PREHOOK: type: QUERY
 PREHOOK: Input: default@user_web_events
+PREHOOK: Output: default@user_web_events
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table user_web_events compute statistics for columns `user id`
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@user_web_events
+POSTHOOK: Output: default@user_web_events
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out b/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out
index f51ff4c..a2c6ead 100644
--- a/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out
+++ b/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out
@@ -83,7 +83,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: sourceIP, avgTimeOnSite, adRevenue
           Column Types: string, int, float
@@ -106,7 +107,8 @@ STAGE PLANS:
           TableScan
             alias: uservisits_web_text_none
             Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
+            Statistics Aggregation Key Prefix: default.uservisits_web_text_none/
+            GatherStats: true
             Select Operator
               expressions: sourceip (type: string), adrevenue (type: float), avgtimeonsite (type: int)
               outputColumnNames: sourceip, adrevenue, avgtimeonsite
@@ -205,7 +207,9 @@ STAGE PLANS:
             MultiFileSpray: false
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.uservisits_web_text_none/
       Column Stats Desc:
           Columns: sourceIP, avgTimeOnSite, adRevenue
           Column Types: string, int, float
@@ -215,10 +219,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue
 PREHOOK: type: QUERY
 PREHOOK: Input: default@uservisits_web_text_none
+PREHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@uservisits_web_text_none
+POSTHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 PREHOOK: query: explain 
 analyze table default.UserVisits_web_text_none compute statistics for columns
@@ -236,11 +242,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: uservisits_web_text_none
-            Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 55 Data size: 7005 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sourceip (type: string), desturl (type: string), visitdate (type: string), adrevenue (type: float), useragent (type: string), ccode (type: string), lcode (type: string), skeyword (type: string), avgtimeonsite (type: int)
               outputColumnNames: sourceip, desturl, visitdate, adrevenue, useragent, ccode, lcode, skeyword, avgtimeonsite
-              Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 55 Data size: 7005 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(sourceip, 'hll'), compute_stats(desturl, 'hll'), compute_stats(visitdate, 'hll'), compute_stats(adrevenue, 'hll'), compute_stats(useragent, 'hll'), compute_stats(ccode, 'hll'), compute_stats(lcode, 'hll'), compute_stats(skeyword, 'hll'), compute_stats(avgtimeonsite, 'hll')
                 mode: hash
@@ -265,7 +271,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: sourceip, desturl, visitdate, adrevenue, useragent, ccode, lcode, skeyword, avgtimeonsite
           Column Types: string, string, string, float, string, string, string, string, int
@@ -274,10 +281,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table default.UserVisits_web_text_none compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@uservisits_web_text_none
+PREHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table default.UserVisits_web_text_none compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@uservisits_web_text_none
+POSTHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted UserVisits_web_text_none destURL
 PREHOOK: type: DESCTABLE
@@ -297,7 +306,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: describe formatted UserVisits_web_text_none adRevenue
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@uservisits_web_text_none
@@ -316,7 +325,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: describe formatted UserVisits_web_text_none avgTimeOnSite
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@uservisits_web_text_none
@@ -335,7 +344,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: CREATE TABLE empty_tab(
    a int,
    b double,
@@ -401,7 +410,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: a, b, c, d, e
           Column Types: int, double, string, boolean, binary
@@ -410,10 +420,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e
 PREHOOK: type: QUERY
 PREHOOK: Input: default@empty_tab
+PREHOOK: Output: default@empty_tab
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@empty_tab
+POSTHOOK: Output: default@empty_tab
 #### A masked pattern was here ####
 PREHOOK: query: create database if not exists dummydb
 PREHOOK: type: CREATEDATABASE
@@ -430,10 +442,12 @@ POSTHOOK: Input: database:dummydb
 PREHOOK: query: analyze table default.UserVisits_web_text_none compute statistics for columns destURL
 PREHOOK: type: QUERY
 PREHOOK: Input: default@uservisits_web_text_none
+PREHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table default.UserVisits_web_text_none compute statistics for columns destURL
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@uservisits_web_text_none
+POSTHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted default.UserVisits_web_text_none destURL
 PREHOOK: type: DESCTABLE
@@ -453,7 +467,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: CREATE TABLE UserVisits_in_dummy_db (
   sourceIP string,
   destURL string,
@@ -541,7 +555,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: sourceIP, avgTimeOnSite, adRevenue
           Column Types: string, int, float
@@ -564,7 +579,8 @@ STAGE PLANS:
           TableScan
             alias: uservisits_in_dummy_db
             Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
+            Statistics Aggregation Key Prefix: dummydb.uservisits_in_dummy_db/
+            GatherStats: true
             Select Operator
               expressions: sourceip (type: string), adrevenue (type: float), avgtimeonsite (type: int)
               outputColumnNames: sourceip, adrevenue, avgtimeonsite
@@ -663,7 +679,9 @@ STAGE PLANS:
             MultiFileSpray: false
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: dummydb.uservisits_in_dummy_db/
       Column Stats Desc:
           Columns: sourceIP, avgTimeOnSite, adRevenue
           Column Types: string, int, float
@@ -673,10 +691,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table dummydb.UserVisits_in_dummy_db compute statistics for columns sourceIP, avgTimeOnSite, adRevenue
 PREHOOK: type: QUERY
 PREHOOK: Input: dummydb@uservisits_in_dummy_db
+PREHOOK: Output: dummydb@uservisits_in_dummy_db
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table dummydb.UserVisits_in_dummy_db compute statistics for columns sourceIP, avgTimeOnSite, adRevenue
 POSTHOOK: type: QUERY
 POSTHOOK: Input: dummydb@uservisits_in_dummy_db
+POSTHOOK: Output: dummydb@uservisits_in_dummy_db
 #### A masked pattern was here ####
 PREHOOK: query: explain 
 analyze table dummydb.UserVisits_in_dummy_db compute statistics for columns
@@ -694,11 +714,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: uservisits_in_dummy_db
-            Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 55 Data size: 7005 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sourceip (type: string), desturl (type: string), visitdate (type: string), adrevenue (type: float), useragent (type: string), ccode (type: string), lcode (type: string), skeyword (type: string), avgtimeonsite (type: int)
               outputColumnNames: sourceip, desturl, visitdate, adrevenue, useragent, ccode, lcode, skeyword, avgtimeonsite
-              Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 55 Data size: 7005 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(sourceip, 'hll'), compute_stats(desturl, 'hll'), compute_stats(visitdate, 'hll'), compute_stats(adrevenue, 'hll'), compute_stats(useragent, 'hll'), compute_stats(ccode, 'hll'), compute_stats(lcode, 'hll'), compute_stats(skeyword, 'hll'), compute_stats(avgtimeonsite, 'hll')
                 mode: hash
@@ -723,7 +743,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: sourceip, desturl, visitdate, adrevenue, useragent, ccode, lcode, skeyword, avgtimeonsite
           Column Types: string, string, string, float, string, string, string, string, int
@@ -732,10 +753,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table dummydb.UserVisits_in_dummy_db compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: dummydb@uservisits_in_dummy_db
+PREHOOK: Output: dummydb@uservisits_in_dummy_db
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table dummydb.UserVisits_in_dummy_db compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: dummydb@uservisits_in_dummy_db
+POSTHOOK: Output: dummydb@uservisits_in_dummy_db
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted dummydb.UserVisits_in_dummy_db destURL
 PREHOOK: type: DESCTABLE
@@ -755,7 +778,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: describe formatted dummydb.UserVisits_in_dummy_db adRevenue
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: dummydb@uservisits_in_dummy_db
@@ -774,7 +797,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: describe formatted dummydb.UserVisits_in_dummy_db avgTimeOnSite
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: dummydb@uservisits_in_dummy_db
@@ -793,7 +816,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: drop table dummydb.UserVisits_in_dummy_db
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: dummydb@uservisits_in_dummy_db

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/compustat_avro.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/compustat_avro.q.out b/ql/src/test/results/clientpositive/compustat_avro.q.out
index cdf9d36..f168cc5 100644
--- a/ql/src/test/results/clientpositive/compustat_avro.q.out
+++ b/ql/src/test/results/clientpositive/compustat_avro.q.out
@@ -46,10 +46,12 @@ COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"tru
 PREHOOK: query: analyze table testAvro compute statistics for columns col1,col3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@testavro
+PREHOOK: Output: default@testavro
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table testAvro compute statistics for columns col1,col3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@testavro
+POSTHOOK: Output: default@testavro
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted testAvro col1
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/compute_stats_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/compute_stats_date.q.out b/ql/src/test/results/clientpositive/compute_stats_date.q.out
index 1e6de61..796b3d3 100644
--- a/ql/src/test/results/clientpositive/compute_stats_date.q.out
+++ b/ql/src/test/results/clientpositive/compute_stats_date.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: fl_date
           Column Types: date
@@ -98,10 +99,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table tab_date compute statistics for columns fl_date
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab_date
+PREHOOK: Output: default@tab_date
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab_date compute statistics for columns fl_date
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab_date
+POSTHOOK: Output: default@tab_date
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted tab_date fl_date
 PREHOOK: type: DESCTABLE
@@ -121,7 +124,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"fl_date\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"fl_date\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
 POSTHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0')
@@ -144,4 +147,4 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"fl_date\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"fl_date\":\"true\"}}	 	 	 	 	 	 	 	 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/constGby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constGby.q.out b/ql/src/test/results/clientpositive/constGby.q.out
index 7115be3..c633624 100644
--- a/ql/src/test/results/clientpositive/constGby.q.out
+++ b/ql/src/test/results/clientpositive/constGby.q.out
@@ -17,10 +17,12 @@ POSTHOOK: Output: default@t1
 PREHOOK: query: analyze table t1 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table t1 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
 #### A masked pattern was here ####
 PREHOOK: query: explain select count(1) from t1 group by 1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/constant_prop_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constant_prop_2.q.out b/ql/src/test/results/clientpositive/constant_prop_2.q.out
index 77a9122..7c45245 100644
--- a/ql/src/test/results/clientpositive/constant_prop_2.q.out
+++ b/ql/src/test/results/clientpositive/constant_prop_2.q.out
@@ -74,7 +74,8 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/constant_prop_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constant_prop_3.q.out b/ql/src/test/results/clientpositive/constant_prop_3.q.out
index 4b3de33..cba4744 100644
--- a/ql/src/test/results/clientpositive/constant_prop_3.q.out
+++ b/ql/src/test/results/clientpositive/constant_prop_3.q.out
@@ -51,10 +51,12 @@ POSTHOOK: Output: default@part_hive
 PREHOOK: query: analyze table part_hive compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part_hive
+PREHOOK: Output: default@part_hive
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table part_hive compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@part_hive
+POSTHOOK: Output: default@part_hive
 #### A masked pattern was here ####
 PREHOOK: query: analyze table partsupp_hive compute statistics
 PREHOOK: type: QUERY
@@ -67,10 +69,12 @@ POSTHOOK: Output: default@partsupp_hive
 PREHOOK: query: analyze table partsupp_hive compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partsupp_hive
+PREHOOK: Output: default@partsupp_hive
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table partsupp_hive compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partsupp_hive
+POSTHOOK: Output: default@partsupp_hive
 #### A masked pattern was here ####
 PREHOOK: query: analyze table supplier_hive compute statistics
 PREHOOK: type: QUERY
@@ -83,10 +87,12 @@ POSTHOOK: Output: default@supplier_hive
 PREHOOK: query: analyze table supplier_hive compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@supplier_hive
+PREHOOK: Output: default@supplier_hive
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table supplier_hive compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@supplier_hive
+POSTHOOK: Output: default@supplier_hive
 #### A masked pattern was here ####
 Warning: Shuffle Join JOIN[25][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/constprog_dp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constprog_dp.q.out b/ql/src/test/results/clientpositive/constprog_dp.q.out
index 8cf301d..86b6c32 100644
--- a/ql/src/test/results/clientpositive/constprog_dp.q.out
+++ b/ql/src/test/results/clientpositive/constprog_dp.q.out
@@ -66,7 +66,8 @@ STAGE PLANS:
               name: default.dest
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/constprog_type.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constprog_type.q.out b/ql/src/test/results/clientpositive/constprog_type.q.out
index d145d37..27ef1f4 100644
--- a/ql/src/test/results/clientpositive/constprog_type.q.out
+++ b/ql/src/test/results/clientpositive/constprog_type.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/correlated_join_keys.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/correlated_join_keys.q.out b/ql/src/test/results/clientpositive/correlated_join_keys.q.out
index b81a561..aaea845 100644
--- a/ql/src/test/results/clientpositive/correlated_join_keys.q.out
+++ b/ql/src/test/results/clientpositive/correlated_join_keys.q.out
@@ -61,10 +61,12 @@ POSTHOOK: Output: default@customer_address
 PREHOOK: query: analyze table customer_address compute statistics for columns ca_state, ca_zip
 PREHOOK: type: QUERY
 PREHOOK: Input: default@customer_address
+PREHOOK: Output: default@customer_address
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table customer_address compute statistics for columns ca_state, ca_zip
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@customer_address
+POSTHOOK: Output: default@customer_address
 #### A masked pattern was here ####
 PREHOOK: query: explain select count(*) from customer_address a join customer_address b on (a.ca_zip = b.ca_zip and a.ca_state = b.ca_state)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/correlationoptimizer5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer5.q.out b/ql/src/test/results/clientpositive/correlationoptimizer5.q.out
index 8c2e6cf..68d6a54 100644
--- a/ql/src/test/results/clientpositive/correlationoptimizer5.q.out
+++ b/ql/src/test/results/clientpositive/correlationoptimizer5.q.out
@@ -215,7 +215,8 @@ STAGE PLANS:
               name: default.dest_co1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -461,7 +462,8 @@ STAGE PLANS:
               name: default.dest_co2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_co2
 SELECT b.key, d.val
@@ -636,7 +638,8 @@ STAGE PLANS:
               name: default.dest_co3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-13
     Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/cp_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cp_sel.q.out b/ql/src/test/results/clientpositive/cp_sel.q.out
index af2efeb..9100037 100644
--- a/ql/src/test/results/clientpositive/cp_sel.q.out
+++ b/ql/src/test/results/clientpositive/cp_sel.q.out
@@ -128,7 +128,8 @@ STAGE PLANS:
               name: default.testpartbucket
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table testpartbucket partition(ds,hr) select key,value,'hello' as ds, 'world' as hr from srcpart where hr=11
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
index 1764c6b..e659650 100644
--- a/ql/src/test/results/clientpositive/ctas.q.out
+++ b/ql/src/test/results/clientpositive/ctas.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
           name: default.nzhang_CTAS1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -248,7 +249,8 @@ STAGE PLANS:
           name: default.nzhang_ctas2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -398,7 +400,8 @@ STAGE PLANS:
           name: default.nzhang_ctas3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -612,7 +615,8 @@ STAGE PLANS:
           name: default.nzhang_ctas4
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -766,7 +770,8 @@ STAGE PLANS:
           name: default.nzhang_ctas5
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT


[15/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_2.q.out b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
new file mode 100644
index 0000000..b209ff0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_2.q.out
@@ -0,0 +1,1557 @@
+PREHOOK: query: drop table src_multi1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi1 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: create table src_multi1 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi1
+PREHOOK: query: insert into table src_multi1 select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: insert into table src_multi1 select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain extended select * from src_multi1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select * from src_multi1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src_multi1
+          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+          GatherStats: false
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: describe formatted src_multi1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi1
+POSTHOOK: query: describe formatted src_multi1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi1
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table a like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: create table b like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: from src
+insert into table a select *
+insert into table b select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a
+PREHOOK: Output: default@b
+POSTHOOK: query: from src
+insert into table a select *
+insert into table b select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted a key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@a
+POSTHOOK: query: describe formatted a key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@a
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	309                 	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	2.812               	 	 	 	 	 	 	 	 	 	 
+max_col_len         	3                   	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted b key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	309                 	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	2.812               	 	 	 	 	 	 	 	 	 	 
+max_col_len         	3                   	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: from src
+insert overwrite table a select *
+insert into table b select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a
+PREHOOK: Output: default@b
+POSTHOOK: query: from src
+insert overwrite table a select *
+insert into table b select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@a
+POSTHOOK: query: describe formatted a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@a
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted b
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	2                   
+	numRows             	1000                
+	rawDataSize         	10624               
+	totalSize           	11624               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted b key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	309                 	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	2.812               	 	 	 	 	 	 	 	 	 	 
+max_col_len         	3                   	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted b value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+col_name            	value               	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	309                 	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	6.812               	 	 	 	 	 	 	 	 	 	 
+max_col_len         	7                   	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: insert into table b select NULL, NULL from src limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@b
+POSTHOOK: query: insert into table b select NULL, NULL from src limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: b.key EXPRESSION []
+POSTHOOK: Lineage: b.value EXPRESSION []
+PREHOOK: query: describe formatted b key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	10                  	 	 	 	 	 	 	 	 	 	 
+distinct_count      	309                 	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	2.812               	 	 	 	 	 	 	 	 	 	 
+max_col_len         	3                   	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted b value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+col_name            	value               	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	10                  	 	 	 	 	 	 	 	 	 	 
+distinct_count      	309                 	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	6.812               	 	 	 	 	 	 	 	 	 	 
+max_col_len         	7                   	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: insert into table b(value) select key+100000 from src limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@b
+POSTHOOK: query: insert into table b(value) select key+100000 from src limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: b.key SIMPLE []
+POSTHOOK: Lineage: b.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: describe formatted b key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	20                  	 	 	 	 	 	 	 	 	 	 
+distinct_count      	309                 	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	2.812               	 	 	 	 	 	 	 	 	 	 
+max_col_len         	3                   	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted b value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+col_name            	value               	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	10                  	 	 	 	 	 	 	 	 	 	 
+distinct_count      	319                 	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	8.0                 	 	 	 	 	 	 	 	 	 	 
+max_col_len         	8                   	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: drop table src_multi2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi2 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: create table src_multi2 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi2
+PREHOOK: query: insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: insert into table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_multi2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi2
+POSTHOOK: query: describe formatted src_multi2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi2
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	508                 
+	rawDataSize         	5400                
+	totalSize           	5908                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists nzhang_part14 (key string)
+  partitioned by (value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string)
+  partitioned by (value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: insert into table nzhang_part14 partition(value) 
+select key, value from (
+  select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a 
+  union all
+  select * from (select 'k2' as key, '' as value from src limit 2)b
+  union all 
+  select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert into table nzhang_part14 partition(value) 
+select key, value from (
+  select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a 
+  union all
+  select * from (select 'k2' as key, '' as value from src limit 2)b
+  union all 
+  select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@value= 
+POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
+PREHOOK: query: explain select key from nzhang_part14
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from nzhang_part14
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: nzhang_part14
+          Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: drop table src5
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src5
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src5 as select key, value from src limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src5
+POSTHOOK: query: create table src5 as select key, value from src limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src5
+POSTHOOK: Lineage: src5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table nzhang_part14 partition(value)
+select key, value from src5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src5
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert into table nzhang_part14 partition(value)
+select key, value from src5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src5
+POSTHOOK: Output: default@nzhang_part14@value=val_165
+POSTHOOK: Output: default@nzhang_part14@value=val_238
+POSTHOOK: Output: default@nzhang_part14@value=val_27
+POSTHOOK: Output: default@nzhang_part14@value=val_311
+POSTHOOK: Output: default@nzhang_part14@value=val_86
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_165).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_238).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_27).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_311).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_86).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: explain select key from nzhang_part14
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from nzhang_part14
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: nzhang_part14
+          Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: PARTIAL
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: PARTIAL
+            ListSink
+
+PREHOOK: query: drop table alter5
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table alter5
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alter5
+POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alter5
+PREHOOK: query: alter table alter5 add partition (dt='a')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@alter5
+POSTHOOK: query: alter table alter5 add partition (dt='a')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@alter5
+POSTHOOK: Output: default@alter5@dt=a
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name            	data_type           	comment             
+col1                	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+dt                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[a]                 	 
+Database:           	default             	 
+Table:              	alter5              	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\"}}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into table alter5 partition (dt='a') select key from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@alter5@dt=a
+POSTHOOK: query: insert into table alter5 partition (dt='a') select key from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@alter5@dt=a
+POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name            	data_type           	comment             
+col1                	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+dt                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[a]                 	 
+Database:           	default             	 
+Table:              	alter5              	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	1406                
+	totalSize           	1906                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select * from alter5 where dt='a'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from alter5 where dt='a'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: alter5
+          Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: col1 (type: string), 'a' (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 86000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: drop table alter5
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@alter5
+PREHOOK: Output: default@alter5
+POSTHOOK: query: drop table alter5
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@alter5
+POSTHOOK: Output: default@alter5
+PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alter5
+POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alter5
+PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta'
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@alter5
+POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta'
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@alter5
+POSTHOOK: Output: default@alter5@dt=a
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name            	data_type           	comment             
+col1                	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+dt                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[a]                 	 
+Database:           	default             	 
+Table:              	alter5              	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into table alter5 partition (dt='a') select key from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@alter5@dt=a
+POSTHOOK: query: insert into table alter5 partition (dt='a') select key from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@alter5@dt=a
+POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name            	data_type           	comment             
+col1                	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+dt                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[a]                 	 
+Database:           	default             	 
+Table:              	alter5              	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	1                   
+	totalSize           	1906                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select * from alter5 where dt='a'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from alter5 where dt='a'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: alter5
+          Statistics: Num rows: 19 Data size: 1653 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: col1 (type: string), 'a' (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 19 Data size: 3268 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: drop table src_stat_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_stat_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_stat_part
+POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_stat_part
+PREHOOK: query: insert into table src_stat_part partition (partitionId=1)
+select * from src1 limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: query: insert into table src_stat_part partition (partitionId=1)
+select * from src1 limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_stat_part
+POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_stat_part
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+partitionid         	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	src_stat_part       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	5                   
+	rawDataSize         	38                  
+	totalSize           	43                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into table src_stat_part partition (partitionId=2)
+select * from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_part@partitionid=2
+POSTHOOK: query: insert into table src_stat_part partition (partitionId=2)
+select * from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_part@partitionid=2
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_stat_part
+POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_stat_part
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+partitionid         	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2]                 	 
+Database:           	default             	 
+Table:              	src_stat_part       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	25                  
+	rawDataSize         	191                 
+	totalSize           	216                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table srcbucket_mapjoin
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table srcbucket_mapjoin
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: drop table tab_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tab_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab_part
+POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab_part
+PREHOOK: query: drop table srcbucket_mapjoin_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table srcbucket_mapjoin_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: insert into table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: query: insert into table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: describe formatted tab_part partition (ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tab_part
+POSTHOOK: query: describe formatted tab_part partition (ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tab_part
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08]        	 
+Database:           	default             	 
+Table:              	tab_part            	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	4                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[Order(col:key, order:1)]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab
+POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab
+PREHOOK: query: insert into table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert into table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: describe formatted tab partition (ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tab
+POSTHOOK: query: describe formatted tab partition (ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tab
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08]        	 
+Database:           	default             	 
+Table:              	tab                 	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	2                   
+	numRows             	242                 
+	rawDataSize         	2566                
+	totalSize           	2808                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	2                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[Order(col:key, order:1)]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+  partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+  partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: describe formatted nzhang_part14
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: describe formatted nzhang_part14
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numPartitions       	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into table nzhang_part14 partition(ds, hr) 
+select key, value, ds, hr from (
+  select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a 
+  union all
+  select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+  union all 
+  select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert into table nzhang_part14 partition(ds, hr) 
+select key, value, ds, hr from (
+  select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a 
+  union all
+  select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+  union all 
+  select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3
+POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION []
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1, 3]              	 
+Database:           	default             	 
+Table:              	nzhang_part14       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	2                   
+	rawDataSize         	6                   
+	totalSize           	8                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part14@ds=2010-03-03
+POSTHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2010-03-03, 12]    	 
+Database:           	default             	 
+Table:              	nzhang_part14       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	1000                
+	rawDataSize         	10624               
+	totalSize           	11624               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part14@ds=2010-03-03
+POSTHOOK: query: INSERT into TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2010-03-03, 12]    	 
+Database:           	default             	 
+Table:              	nzhang_part14       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	1000                
+	rawDataSize         	10624               
+	totalSize           	11624               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@a
+PREHOOK: Output: default@a
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@a
+POSTHOOK: Output: default@a
+PREHOOK: query: create table a (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@b
+PREHOOK: Output: default@b
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@b
+POSTHOOK: Output: default@b
+PREHOOK: query: create table b (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: drop table c
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table c
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table c (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@c
+POSTHOOK: query: create table c (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@c
+PREHOOK: query: FROM srcpart 
+INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@a@ds=2010-03-11
+PREHOOK: Output: default@b@ds=2010-04-11
+PREHOOK: Output: default@c@ds=2010-05-11
+POSTHOOK: query: FROM srcpart 
+INSERT into TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT into TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT into TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@a@ds=2010-03-11/hr=11
+POSTHOOK: Output: default@a@ds=2010-03-11/hr=12
+POSTHOOK: Output: default@b@ds=2010-04-11/hr=12
+POSTHOOK: Output: default@c@ds=2010-05-11/hr=11
+POSTHOOK: Output: default@c@ds=2010-05-11/hr=12
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain select key from a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: a
+          Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: explain select value from b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select value from b
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: b
+          Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: value (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: explain select key from b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from b
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: b
+          Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: explain select value from c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select value from c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: c
+          Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: value (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: explain select key from c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: c
+          Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_3.q.out b/ql/src/test/results/clientpositive/autoColumnStats_3.q.out
index c6527a9..b3d8d3e 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_3.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_3.q.out
@@ -13,10 +13,12 @@ POSTHOOK: Output: default@src_multi1
 PREHOOK: query: analyze table src_multi1 compute statistics for columns key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src_multi1
+PREHOOK: Output: default@src_multi1
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table src_multi1 compute statistics for columns key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src_multi1
+POSTHOOK: Output: default@src_multi1
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted src_multi1
 PREHOOK: type: DESCTABLE
@@ -222,11 +224,15 @@ PREHOOK: query: analyze table nzhang_part14 partition(ds='1', hr='3') compute st
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_part14
 PREHOOK: Input: default@nzhang_part14@ds=1/hr=3
+PREHOOK: Output: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14@ds=1/hr=3
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table nzhang_part14 partition(ds='1', hr='3') compute statistics for columns value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@nzhang_part14
 POSTHOOK: Input: default@nzhang_part14@ds=1/hr=3
+POSTHOOK: Output: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
 PREHOOK: type: DESCTABLE
@@ -355,7 +361,7 @@ Database:           	default
 Table:              	nzhang_part14       	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"value\":\"true\"}}
 	numFiles            	2                   
 	numRows             	4                   
 	rawDataSize         	12                  
@@ -393,7 +399,7 @@ Database:           	default
 Table:              	nzhang_part14       	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	numFiles            	2                   
 	numRows             	4                   
 	rawDataSize         	16                  

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
index 9155733..2bc1789 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -49,8 +49,7 @@ STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
-  Stage-5 depends on stages: Stage-3, Stage-4
+  Stage-3 depends on stages: Stage-0, Stage-4
   Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
@@ -138,10 +137,8 @@ STAGE PLANS:
           Write Type: INSERT
 
   Stage: Stage-3
-    Stats-Aggr Operator
-
-  Stage: Stage-5
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: a, b
           Column Types: int, varchar(128)
@@ -196,7 +193,6 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}}
 	numFiles            	2                   
 	numRows             	0                   
 	rawDataSize         	0                   
@@ -239,6 +235,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	4                   
 	numRows             	0                   
 	rawDataSize         	0                   

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
index cc4d32b..196d18d 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
@@ -17,7 +17,6 @@ STAGE DEPENDENCIES:
   Stage-4
   Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-2 depends on stages: Stage-0
-  Stage-8 depends on stages: Stage-2
   Stage-3
   Stage-5
   Stage-6 depends on stages: Stage-5
@@ -98,10 +97,8 @@ STAGE PLANS:
               name: default.partitioned1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-8
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: a, b
           Column Types: int, string
@@ -260,7 +257,6 @@ STAGE DEPENDENCIES:
   Stage-4
   Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-2 depends on stages: Stage-0
-  Stage-8 depends on stages: Stage-2
   Stage-3
   Stage-5
   Stage-6 depends on stages: Stage-5
@@ -341,10 +337,8 @@ STAGE PLANS:
               name: default.partitioned1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-8
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: a, b, c, d
           Column Types: int, string, int, string
@@ -461,7 +455,6 @@ STAGE DEPENDENCIES:
   Stage-4
   Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-2 depends on stages: Stage-0
-  Stage-8 depends on stages: Stage-2
   Stage-3
   Stage-5
   Stage-6 depends on stages: Stage-5
@@ -542,10 +535,8 @@ STAGE PLANS:
               name: default.partitioned1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-8
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: a, b, c, d
           Column Types: int, string, int, string
@@ -613,7 +604,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}}
 	numFiles            	2                   
 	numRows             	6                   
 	rawDataSize         	78                  
@@ -656,15 +647,5 @@ POSTHOOK: query: desc formatted partitioned1 partition(part=1) c
 POSTHOOK: type: DESCTABLE
 POSTHOOK: Input: default@partitioned1
 col_name	data_type	min	max	num_nulls	distinct_count	avg_col_len	max_col_len	num_trues	num_falses	bitvector	comment
-col_name            	c                   	 	 	 	 	 	 	 	 	 	 
-data_type           	int                 	 	 	 	 	 	 	 	 	 	 
-min                 	100                 	 	 	 	 	 	 	 	 	 	 
-max                 	200                 	 	 	 	 	 	 	 	 	 	 
-num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
-distinct_count      	2                   	 	 	 	 	 	 	 	 	 	 
-avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
-max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
-num_trues           	                    	 	 	 	 	 	 	 	 	 	 
-num_falses          	                    	 	 	 	 	 	 	 	 	 	 
-bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
-comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 	 
+c                   	int                 	from deserializer   	 	 	 	 	 	 	 	 	 


[05/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out
index 04f58fa..4af92e8 100644
--- a/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby5.q.out b/ql/src/test/results/clientpositive/spark/groupby5.q.out
index d292f74..8dfed8b 100644
--- a/ql/src/test/results/clientpositive/spark/groupby5.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby5.q.out
@@ -92,7 +92,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 
 SELECT src.key, sum(substr(src.value,5)) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby5_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby5_map.q.out b/ql/src/test/results/clientpositive/spark/groupby5_map.q.out
index add3094..96cf6b7 100644
--- a/ql/src/test/results/clientpositive/spark/groupby5_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby5_map.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out
index 924ef5d..11a6558 100644
--- a/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out
index 300ccb6..16ac57b 100644
--- a/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out
@@ -77,7 +77,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 
 SELECT src.key, sum(substr(src.value,5)) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby6.q.out b/ql/src/test/results/clientpositive/spark/groupby6.q.out
index 4f406d7..04e6127 100644
--- a/ql/src/test/results/clientpositive/spark/groupby6.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby6.q.out
@@ -80,7 +80,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby6_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby6_map.q.out b/ql/src/test/results/clientpositive/spark/groupby6_map.q.out
index 03f68c6..509014a 100644
--- a/ql/src/test/results/clientpositive/spark/groupby6_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby6_map.q.out
@@ -72,7 +72,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out
index 606b5d5..e2260ed 100644
--- a/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out
index eb72f01..f955519 100644
--- a/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby7_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map.q.out
index 31daab8..9260e7d 100644
--- a/ql/src/test/results/clientpositive/spark/groupby7_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby7_map.q.out
@@ -112,7 +112,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -125,7 +126,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out
index 625a737..c628b39 100644
--- a/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out
@@ -105,7 +105,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -118,7 +119,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out
index 4fbfd30..e55a79d 100644
--- a/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out
@@ -127,7 +127,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -140,7 +141,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out
index a26247a..9e05b99 100644
--- a/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out
@@ -106,7 +106,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -119,7 +120,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out
index 2dce301..9a9f7b2 100644
--- a/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out
@@ -125,7 +125,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -138,7 +139,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby8.q.out b/ql/src/test/results/clientpositive/spark/groupby8.q.out
index f7f3279..c66a53d 100644
--- a/ql/src/test/results/clientpositive/spark/groupby8.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby8.q.out
@@ -120,7 +120,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -133,7 +134,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -893,7 +895,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -906,7 +909,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby8_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby8_map.q.out b/ql/src/test/results/clientpositive/spark/groupby8_map.q.out
index 288ca3f..25c07d0 100644
--- a/ql/src/test/results/clientpositive/spark/groupby8_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby8_map.q.out
@@ -104,7 +104,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -117,7 +118,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out
index 9e76fd5..1da8890 100644
--- a/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out
@@ -126,7 +126,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -139,7 +140,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out
index 288ca3f..25c07d0 100644
--- a/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out
@@ -104,7 +104,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -117,7 +118,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby9.q.out b/ql/src/test/results/clientpositive/spark/groupby9.q.out
index d59d8cf..c016e4b 100644
--- a/ql/src/test/results/clientpositive/spark/groupby9.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby9.q.out
@@ -131,7 +131,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -144,7 +145,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -916,7 +918,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -929,7 +932,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -1701,7 +1705,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1714,7 +1719,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -2488,7 +2494,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -2501,7 +2508,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -3273,7 +3281,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3286,7 +3295,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out b/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out
index 169ee04..3b5ec42 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out
@@ -175,7 +175,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -188,7 +189,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -201,7 +203,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
index 7a730d2..71e74a9 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
@@ -159,7 +159,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -172,7 +173,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
index 52c87ef..7629fe6 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
@@ -704,7 +704,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -717,7 +718,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T1
 INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out b/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out
index f3a95bd..d1fcd48 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out
@@ -234,7 +234,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out
index 666f355..30e5daa 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out
@@ -234,7 +234,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out
index bf4132a..f88ff7a 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out
@@ -131,7 +131,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -144,7 +145,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table dest1 select key, count(distinct value) group by key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
index c16df1b..89db8ce 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out
@@ -157,7 +157,8 @@ STAGE PLANS:
               name: default.dest_g4
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -170,7 +171,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -183,7 +185,8 @@ STAGE PLANS:
               name: default.dest_g3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1)
@@ -468,7 +471,8 @@ STAGE PLANS:
               name: default.dest_g4
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -481,7 +485,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -494,7 +499,8 @@ STAGE PLANS:
               name: default.dest_g3
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator
@@ -507,7 +513,8 @@ STAGE PLANS:
               name: default.dest_h2
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Move Operator
@@ -520,7 +527,8 @@ STAGE PLANS:
               name: default.dest_h3
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
index 37deb93..a90082f 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -123,7 +124,8 @@ STAGE PLANS:
               name: default.dest_g3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
index f9eaa3c..50ae717 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
@@ -122,7 +122,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -135,7 +136,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table e1
@@ -294,7 +296,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -307,7 +310,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table e1
@@ -466,7 +470,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -479,7 +484,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table e1
@@ -638,7 +644,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -651,7 +658,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table e1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_position.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_position.q.out b/ql/src/test/results/clientpositive/spark/groupby_position.q.out
index 91c2581..a88176f 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_position.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_position.q.out
@@ -129,7 +129,8 @@ STAGE PLANS:
               name: default.testtable1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -142,7 +143,8 @@ STAGE PLANS:
               name: default.testtable2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1
@@ -320,7 +322,8 @@ STAGE PLANS:
               name: default.testtable1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -333,7 +336,8 @@ STAGE PLANS:
               name: default.testtable2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out b/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
index e45b5c2..2bbffaf 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
@@ -227,7 +227,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out
index c3c97f5..14c287f 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out
@@ -227,7 +227,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src
@@ -494,7 +495,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
index 68670ab..29b111f 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
@@ -537,7 +537,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -550,7 +551,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T1
 INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
index 8292e3a..5ceb9e9 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
@@ -189,7 +189,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -400,7 +401,8 @@ STAGE PLANS:
               name: default.outputtbl2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2
@@ -585,7 +587,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -768,7 +771,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -959,7 +963,8 @@ STAGE PLANS:
               name: default.outputtbl3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3
@@ -1171,7 +1176,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -1377,7 +1383,8 @@ STAGE PLANS:
               name: default.outputtbl3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3
@@ -1595,7 +1602,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -1897,7 +1905,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -2232,7 +2241,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -2538,7 +2548,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -3014,7 +3025,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -3197,7 +3209,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -3391,7 +3404,8 @@ STAGE PLANS:
               name: default.outputtbl5
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5
@@ -3584,7 +3598,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -3784,7 +3799,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -3941,7 +3957,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3954,7 +3971,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T2
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
@@ -4105,7 +4123,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -4118,7 +4137,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key, val from T2 where key = 8) x
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
index 4284726..4b11eb4 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
@@ -189,7 +189,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -419,7 +420,8 @@ STAGE PLANS:
               name: default.outputtbl2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2
@@ -604,7 +606,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -787,7 +790,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -978,7 +982,8 @@ STAGE PLANS:
               name: default.outputtbl3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3
@@ -1209,7 +1214,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -1434,7 +1440,8 @@ STAGE PLANS:
               name: default.outputtbl3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3
@@ -1671,7 +1678,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -1973,7 +1981,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -2327,7 +2336,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -2633,7 +2643,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -3147,7 +3158,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -3330,7 +3342,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -3524,7 +3537,8 @@ STAGE PLANS:
               name: default.outputtbl5
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5
@@ -3717,7 +3731,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -3917,7 +3932,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -4089,7 +4105,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -4102,7 +4119,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T2
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
@@ -4268,7 +4286,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -4281,7 +4300,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key, val from T2 where key = 8) x
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
index 49eafbe..e0d5e58 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
@@ -103,7 +103,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
 SELECT key, count(*) FROM test_table1 GROUP BY key
@@ -260,7 +261,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
 SELECT a.key, a.value FROM (
@@ -381,7 +383,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1')
 SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key
@@ -516,7 +519,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1')
 SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
index 8da2705..1bdf641 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out
@@ -77,7 +77,8 @@ STAGE PLANS:
               name: default.test_table
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr)
 SELECT key2, value, cast(hr as int) FROM

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/innerjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/innerjoin.q.out b/ql/src/test/results/clientpositive/spark/innerjoin.q.out
index 9328b99..c02aa2a 100644
--- a/ql/src/test/results/clientpositive/spark/innerjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/innerjoin.q.out
@@ -95,7 +95,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/input12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input12.q.out b/ql/src/test/results/clientpositive/spark/input12.q.out
index efbaaf9..2fba863 100644
--- a/ql/src/test/results/clientpositive/spark/input12.q.out
+++ b/ql/src/test/results/clientpositive/spark/input12.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -123,7 +124,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -139,7 +141,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src 
 INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/input13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input13.q.out b/ql/src/test/results/clientpositive/spark/input13.q.out
index a72898f..dc5eec3 100644
--- a/ql/src/test/results/clientpositive/spark/input13.q.out
+++ b/ql/src/test/results/clientpositive/spark/input13.q.out
@@ -127,7 +127,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -140,7 +141,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -156,7 +158,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/input14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input14.q.out b/ql/src/test/results/clientpositive/spark/input14.q.out
index 36f162e..ef2f37c 100644
--- a/ql/src/test/results/clientpositive/spark/input14.q.out
+++ b/ql/src/test/results/clientpositive/spark/input14.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/input17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input17.q.out b/ql/src/test/results/clientpositive/spark/input17.q.out
index d95dbcb..5f207aa 100644
--- a/ql/src/test/results/clientpositive/spark/input17.q.out
+++ b/ql/src/test/results/clientpositive/spark/input17.q.out
@@ -84,7 +84,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src_thrift

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/input18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input18.q.out b/ql/src/test/results/clientpositive/spark/input18.q.out
index 65850b2..ba77d15 100644
--- a/ql/src/test/results/clientpositive/spark/input18.q.out
+++ b/ql/src/test/results/clientpositive/spark/input18.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/input1_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input1_limit.q.out b/ql/src/test/results/clientpositive/spark/input1_limit.q.out
index dd49287..cfc96cd 100644
--- a/ql/src/test/results/clientpositive/spark/input1_limit.q.out
+++ b/ql/src/test/results/clientpositive/spark/input1_limit.q.out
@@ -133,7 +133,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -146,7 +147,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/input_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input_part2.q.out b/ql/src/test/results/clientpositive/spark/input_part2.q.out
index a728e97..12a0afb 100644
--- a/ql/src/test/results/clientpositive/spark/input_part2.q.out
+++ b/ql/src/test/results/clientpositive/spark/input_part2.q.out
@@ -254,7 +254,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-1
@@ -286,7 +287,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/insert1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert1.q.out b/ql/src/test/results/clientpositive/spark/insert1.q.out
index fa8757f..25f7289 100644
--- a/ql/src/test/results/clientpositive/spark/insert1.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert1.q.out
@@ -70,7 +70,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1)
 PREHOOK: type: QUERY
@@ -118,7 +119,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create database x
 PREHOOK: type: CREATEDATABASE
@@ -180,7 +182,8 @@ STAGE PLANS:
               name: x.insert1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table default.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1)
 PREHOOK: type: QUERY
@@ -228,7 +231,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 from insert2
@@ -299,7 +303,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -312,7 +317,8 @@ STAGE PLANS:
               name: x.insert1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: CREATE DATABASE db2
 PREHOOK: type: CREATEDATABASE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert_into1.q.out b/ql/src/test/results/clientpositive/spark/insert_into1.q.out
index dff389d..4a5c3b8 100644
--- a/ql/src/test/results/clientpositive/spark/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert_into1.q.out
@@ -74,7 +74,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 100
 PREHOOK: type: QUERY
@@ -188,7 +189,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 100
 PREHOOK: type: QUERY
@@ -302,7 +304,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 10
 PREHOOK: type: QUERY
@@ -396,7 +399,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table insert_into1 select 1, 'a'
 PREHOOK: type: QUERY
@@ -452,7 +456,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into insert_into1 select 2, 'b'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert_into2.q.out b/ql/src/test/results/clientpositive/spark/insert_into2.q.out
index 329387d..6724963 100644
--- a/ql/src/test/results/clientpositive/spark/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert_into2.q.out
@@ -80,7 +80,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src order by key limit 100
 PREHOOK: type: QUERY
@@ -235,7 +236,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
   SELECT * FROM src order by key LIMIT 100
@@ -359,7 +361,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
   SELECT * FROM src order by key LIMIT 50

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/insert_into3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert_into3.q.out b/ql/src/test/results/clientpositive/spark/insert_into3.q.out
index a6fac23..a75cb81 100644
--- a/ql/src/test/results/clientpositive/spark/insert_into3.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert_into3.q.out
@@ -125,7 +125,8 @@ STAGE PLANS:
               name: default.insert_into3a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -138,7 +139,8 @@ STAGE PLANS:
               name: default.insert_into3b
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
          INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100
@@ -274,7 +276,8 @@ STAGE PLANS:
               name: default.insert_into3a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -287,7 +290,8 @@ STAGE PLANS:
               name: default.insert_into3b
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
          INSERT INTO TABLE insert_into3b SELECT * LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join1.q.out b/ql/src/test/results/clientpositive/spark/join1.q.out
index a0ee4ea..d89d3e5 100644
--- a/ql/src/test/results/clientpositive/spark/join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/join1.q.out
@@ -95,7 +95,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join14.q.out b/ql/src/test/results/clientpositive/spark/join14.q.out
index e804a1d..11edde0 100644
--- a/ql/src/test/results/clientpositive/spark/join14.q.out
+++ b/ql/src/test/results/clientpositive/spark/join14.q.out
@@ -95,7 +95,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
 INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join17.q.out b/ql/src/test/results/clientpositive/spark/join17.q.out
index 3644efd..e8619fe 100644
--- a/ql/src/test/results/clientpositive/spark/join17.q.out
+++ b/ql/src/test/results/clientpositive/spark/join17.q.out
@@ -251,7 +251,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join2.q.out b/ql/src/test/results/clientpositive/spark/join2.q.out
index f684beb..d2d6b1d 100644
--- a/ql/src/test/results/clientpositive/spark/join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/join2.q.out
@@ -129,7 +129,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key)
 INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join25.q.out b/ql/src/test/results/clientpositive/spark/join25.q.out
index 05e5e70..0514466 100644
--- a/ql/src/test/results/clientpositive/spark/join25.q.out
+++ b/ql/src/test/results/clientpositive/spark/join25.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join26.q.out b/ql/src/test/results/clientpositive/spark/join26.q.out
index ccd4526..e8186ab 100644
--- a/ql/src/test/results/clientpositive/spark/join26.q.out
+++ b/ql/src/test/results/clientpositive/spark/join26.q.out
@@ -329,7 +329,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join27.q.out b/ql/src/test/results/clientpositive/spark/join27.q.out
index e10d2fb..40a91e7 100644
--- a/ql/src/test/results/clientpositive/spark/join27.q.out
+++ b/ql/src/test/results/clientpositive/spark/join27.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join28.q.out b/ql/src/test/results/clientpositive/spark/join28.q.out
index 8d4d870..4a3343a 100644
--- a/ql/src/test/results/clientpositive/spark/join28.q.out
+++ b/ql/src/test/results/clientpositive/spark/join28.q.out
@@ -136,7 +136,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT subq.key1, z.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join29.q.out b/ql/src/test/results/clientpositive/spark/join29.q.out
index 88929fe..8477a7e 100644
--- a/ql/src/test/results/clientpositive/spark/join29.q.out
+++ b/ql/src/test/results/clientpositive/spark/join29.q.out
@@ -136,7 +136,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT subq1.key, subq1.cnt, subq2.cnt

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join3.q.out b/ql/src/test/results/clientpositive/spark/join3.q.out
index e50f091..5ee3c52 100644
--- a/ql/src/test/results/clientpositive/spark/join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/join3.q.out
@@ -114,7 +114,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join30.q.out b/ql/src/test/results/clientpositive/spark/join30.q.out
index 23650ff..5bfa57b 100644
--- a/ql/src/test/results/clientpositive/spark/join30.q.out
+++ b/ql/src/test/results/clientpositive/spark/join30.q.out
@@ -118,7 +118,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join31.q.out b/ql/src/test/results/clientpositive/spark/join31.q.out
index 4edb4ef..7eaaff9 100644
--- a/ql/src/test/results/clientpositive/spark/join31.q.out
+++ b/ql/src/test/results/clientpositive/spark/join31.q.out
@@ -143,7 +143,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT subq1.key, count(1) as cnt

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join32.q.out b/ql/src/test/results/clientpositive/spark/join32.q.out
index 3521f2b..80e8f35 100644
--- a/ql/src/test/results/clientpositive/spark/join32.q.out
+++ b/ql/src/test/results/clientpositive/spark/join32.q.out
@@ -335,7 +335,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
index 2a6b9b6..d3e648f 100644
--- a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
@@ -343,7 +343,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
@@ -880,7 +881,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
@@ -1330,7 +1332,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
@@ -1778,7 +1781,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
@@ -2035,7 +2039,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
 SELECT res.key, x.value, res.value  
@@ -2279,7 +2284,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
 SELECT res.key, y.value, res.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join33.q.out b/ql/src/test/results/clientpositive/spark/join33.q.out
index 3521f2b..80e8f35 100644
--- a/ql/src/test/results/clientpositive/spark/join33.q.out
+++ b/ql/src/test/results/clientpositive/spark/join33.q.out
@@ -335,7 +335,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join34.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join34.q.out b/ql/src/test/results/clientpositive/spark/join34.q.out
index 503235f..2c9fb99 100644
--- a/ql/src/test/results/clientpositive/spark/join34.q.out
+++ b/ql/src/test/results/clientpositive/spark/join34.q.out
@@ -337,7 +337,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join35.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join35.q.out b/ql/src/test/results/clientpositive/spark/join35.q.out
index 54f68af..86ccc5a 100644
--- a/ql/src/test/results/clientpositive/spark/join35.q.out
+++ b/ql/src/test/results/clientpositive/spark/join35.q.out
@@ -379,7 +379,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join36.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join36.q.out b/ql/src/test/results/clientpositive/spark/join36.q.out
index b1717e0..b469fc3 100644
--- a/ql/src/test/results/clientpositive/spark/join36.q.out
+++ b/ql/src/test/results/clientpositive/spark/join36.q.out
@@ -138,7 +138,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(x) */ x.key, x.cnt, y.cnt

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join37.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join37.q.out b/ql/src/test/results/clientpositive/spark/join37.q.out
index 327e93e..8dc4078 100644
--- a/ql/src/test/results/clientpositive/spark/join37.q.out
+++ b/ql/src/test/results/clientpositive/spark/join37.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(X) */ x.key, x.value, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join39.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join39.q.out b/ql/src/test/results/clientpositive/spark/join39.q.out
index 8f0ba62..c094dd9 100644
--- a/ql/src/test/results/clientpositive/spark/join39.q.out
+++ b/ql/src/test/results/clientpositive/spark/join39.q.out
@@ -91,7 +91,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
 SELECT /*+ MAPJOIN(y) */ x.key, x.value, y.key, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join4.q.out b/ql/src/test/results/clientpositive/spark/join4.q.out
index 7e1d473..44ec2e0 100644
--- a/ql/src/test/results/clientpositive/spark/join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/join4.q.out
@@ -118,7 +118,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join5.q.out b/ql/src/test/results/clientpositive/spark/join5.q.out
index c354a0e..37a96d7 100644
--- a/ql/src/test/results/clientpositive/spark/join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/join5.q.out
@@ -118,7 +118,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join6.q.out b/ql/src/test/results/clientpositive/spark/join6.q.out
index c488247..563c940 100644
--- a/ql/src/test/results/clientpositive/spark/join6.q.out
+++ b/ql/src/test/results/clientpositive/spark/join6.q.out
@@ -118,7 +118,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join7.q.out b/ql/src/test/results/clientpositive/spark/join7.q.out
index d9b7c05..86838cc 100644
--- a/ql/src/test/results/clientpositive/spark/join7.q.out
+++ b/ql/src/test/results/clientpositive/spark/join7.q.out
@@ -148,7 +148,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join8.q.out b/ql/src/test/results/clientpositive/spark/join8.q.out
index ae308b0..e6b7c1f 100644
--- a/ql/src/test/results/clientpositive/spark/join8.q.out
+++ b/ql/src/test/results/clientpositive/spark/join8.q.out
@@ -121,7 +121,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join9.q.out b/ql/src/test/results/clientpositive/spark/join9.q.out
index c7a191a..76534ad 100644
--- a/ql/src/test/results/clientpositive/spark/join9.q.out
+++ b/ql/src/test/results/clientpositive/spark/join9.q.out
@@ -252,7 +252,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out b/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
index 86f3d9a..d2c674e 100644
--- a/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
@@ -319,7 +319,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
@@ -811,7 +812,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out
index de3af40..8636307 100644
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out
+++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out
@@ -153,7 +153,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out
index 6427f70..10a3892 100644
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out
@@ -204,7 +204,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out
index 99ebfbd..9c62fb3 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out
@@ -113,7 +113,8 @@ STAGE PLANS:
               name: default.nzhang_part1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -129,7 +130,8 @@ STAGE PLANS:
               name: default.nzhang_part2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from srcpart
 insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out
index 24c76c2..ad4668e 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out
@@ -83,7 +83,8 @@ STAGE PLANS:
               name: default.nzhang_part10
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from srcpart
 insert overwrite table nzhang_part10 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out
index 4318004..5c7e63c 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out
@@ -120,7 +120,8 @@ STAGE PLANS:
               name: default.nzhang_part13
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part13 partition (ds="2010-03-03", hr) 
 select * from (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
index 7a562e4..08d6350 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
@@ -135,7 +135,8 @@ STAGE PLANS:
               name: default.nzhang_part14
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part14 partition(value) 
 select key, value from (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out
index 9bdd77b..6a7bb4d 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out
@@ -88,7 +88,8 @@ STAGE PLANS:
               name: default.nzhang_part_bucket
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out
index 0d4b21a..ca6729a 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out
@@ -81,7 +81,8 @@ STAGE PLANS:
               name: default.nzhang_part3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out
index 937b9d0..368c08a 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out
@@ -91,7 +91,8 @@ STAGE PLANS:
               name: default.nzhang_part4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part4 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
 PREHOOK: type: QUERY


[02/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
index 1fb4842..a202e45 100644
--- a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
+++ b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
@@ -34,6 +34,48 @@ POSTHOOK: type: CREATETABLE
 #### A masked pattern was here ####
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@UserVisits_web_text_none
+PREHOOK: query: desc formatted UserVisits_web_text_none
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@uservisits_web_text_none
+POSTHOOK: query: desc formatted UserVisits_web_text_none
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@uservisits_web_text_none
+# col_name            	data_type           	comment             
+sourceip            	string              	                    
+desturl             	string              	                    
+visitdate           	string              	                    
+adrevenue           	float               	                    
+useragent           	string              	                    
+ccode               	string              	                    
+lcode               	string              	                    
+skeyword            	string              	                    
+avgtimeonsite       	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"ccode\":\"true\",\"desturl\":\"true\",\"lcode\":\"true\",\"skeyword\":\"true\",\"sourceip\":\"true\",\"useragent\":\"true\",\"visitdate\":\"true\"}}
+	EXTERNAL            	TRUE                
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	field.delim         	|                   
+	serialization.format	|                   
 PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none
 PREHOOK: type: LOAD
 #### A masked pattern was here ####
@@ -42,6 +84,47 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO T
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@uservisits_web_text_none
+PREHOOK: query: desc formatted UserVisits_web_text_none
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@uservisits_web_text_none
+POSTHOOK: query: desc formatted UserVisits_web_text_none
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@uservisits_web_text_none
+# col_name            	data_type           	comment             
+sourceip            	string              	                    
+desturl             	string              	                    
+visitdate           	string              	                    
+adrevenue           	float               	                    
+useragent           	string              	                    
+ccode               	string              	                    
+lcode               	string              	                    
+skeyword            	string              	                    
+avgtimeonsite       	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	EXTERNAL            	TRUE                
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	7060                
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	field.delim         	|                   
+	serialization.format	|                   
 PREHOOK: query: desc extended UserVisits_web_text_none sourceIP
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@uservisits_web_text_none
@@ -83,11 +166,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: uservisits_web_text_none
-            Statistics: Num rows: 65 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sourceip (type: string), adrevenue (type: float), avgtimeonsite (type: int)
               outputColumnNames: sourceip, adrevenue, avgtimeonsite
-              Statistics: Num rows: 65 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(sourceip, 'hll'), compute_stats(avgtimeonsite, 'hll'), compute_stats(adrevenue, 'hll')
                 mode: hash
@@ -112,7 +195,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: sourceIP, avgTimeOnSite, adRevenue
           Column Types: string, int, float
@@ -134,12 +218,13 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: uservisits_web_text_none
-            Statistics: Num rows: 65 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
+            Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
+            Statistics Aggregation Key Prefix: default.uservisits_web_text_none/
+            GatherStats: true
             Select Operator
               expressions: sourceip (type: string), adrevenue (type: float), avgtimeonsite (type: int)
               outputColumnNames: sourceip, adrevenue, avgtimeonsite
-              Statistics: Num rows: 65 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: compute_stats(sourceip, 'hll'), compute_stats(avgtimeonsite, 'hll'), compute_stats(adrevenue, 'hll')
                 mode: hash
@@ -171,6 +256,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.uservisits_web_text_none
               numFiles 1
+              numRows 0
+              rawDataSize 0
               serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite}
               serialization.format |
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -190,6 +277,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.uservisits_web_text_none
                 numFiles 1
+                numRows 0
+                rawDataSize 0
                 serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite}
                 serialization.format |
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -230,7 +319,9 @@ STAGE PLANS:
             MultiFileSpray: false
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.uservisits_web_text_none/
       Column Stats Desc:
           Columns: sourceIP, avgTimeOnSite, adRevenue
           Column Types: string, int, float
@@ -240,11 +331,55 @@ STAGE PLANS:
 PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue
 PREHOOK: type: QUERY
 PREHOOK: Input: default@uservisits_web_text_none
+PREHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@uservisits_web_text_none
+POSTHOOK: Output: default@uservisits_web_text_none
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted UserVisits_web_text_none
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@uservisits_web_text_none
+POSTHOOK: query: desc formatted UserVisits_web_text_none
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@uservisits_web_text_none
+# col_name            	data_type           	comment             
+sourceip            	string              	                    
+desturl             	string              	                    
+visitdate           	string              	                    
+adrevenue           	float               	                    
+useragent           	string              	                    
+ccode               	string              	                    
+lcode               	string              	                    
+skeyword            	string              	                    
+avgtimeonsite       	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
 #### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adRevenue\":\"true\",\"avgTimeOnSite\":\"true\",\"sourceIP\":\"true\"}}
+	EXTERNAL            	TRUE                
+	numFiles            	1                   
+	numRows             	55                  
+	rawDataSize         	7005                
+	totalSize           	7060                
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	field.delim         	|                   
+	serialization.format	|                   
 PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@uservisits_web_text_none
@@ -263,6 +398,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adRevenue\":\"true\",\"avgTimeOnSite\":\"true\",\"sourceIP\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: desc formatted UserVisits_web_text_none avgTimeOnSite
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@uservisits_web_text_none
@@ -281,6 +417,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adRevenue\":\"true\",\"avgTimeOnSite\":\"true\",\"sourceIP\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: desc formatted UserVisits_web_text_none adRevenue
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@uservisits_web_text_none
@@ -299,6 +436,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adRevenue\":\"true\",\"avgTimeOnSite\":\"true\",\"sourceIP\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: CREATE TEMPORARY TABLE empty_tab(
    a int,
    b double,
@@ -383,7 +521,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: a, b, c, d, e
           Column Types: int, double, string, boolean, binary
@@ -392,10 +531,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e
 PREHOOK: type: QUERY
 PREHOOK: Input: default@empty_tab
+PREHOOK: Output: default@empty_tab
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@empty_tab
+POSTHOOK: Output: default@empty_tab
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted empty_tab a
 PREHOOK: type: DESCTABLE
@@ -504,6 +645,7 @@ POSTHOOK: query: desc extended default.UserVisits_web_text_none sourceIP
 POSTHOOK: type: DESCTABLE
 POSTHOOK: Input: default@uservisits_web_text_none
 sourceIP            	string              	from deserializer   
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adRevenue\":\"true\",\"avgTimeOnSite\":\"true\",\"sourceIP\":\"true\"}}	 
 PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: test@uservisits_web_text_none
@@ -558,14 +700,17 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adRevenue\":\"true\",\"avgTimeOnSite\":\"true\",\"sourceIP\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sKeyword
 PREHOOK: type: QUERY
 PREHOOK: Input: test@uservisits_web_text_none
 #### A masked pattern was here ####
+PREHOOK: Output: test@uservisits_web_text_none
 POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sKeyword
 POSTHOOK: type: QUERY
 POSTHOOK: Input: test@uservisits_web_text_none
 #### A masked pattern was here ####
+POSTHOOK: Output: test@uservisits_web_text_none
 PREHOOK: query: desc extended UserVisits_web_text_none sKeyword
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: test@uservisits_web_text_none
@@ -573,6 +718,7 @@ POSTHOOK: query: desc extended UserVisits_web_text_none sKeyword
 POSTHOOK: type: DESCTABLE
 POSTHOOK: Input: test@uservisits_web_text_none
 sKeyword            	string              	from deserializer   
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"sKeyword\":\"true\"}}	 
 PREHOOK: query: desc formatted UserVisits_web_text_none sKeyword
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: test@uservisits_web_text_none
@@ -591,6 +737,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"sKeyword\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: desc formatted test.UserVisits_web_text_none sKeyword
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: test@uservisits_web_text_none
@@ -609,3 +756,4 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"sKeyword\":\"true\"}}	 	 	 	 	 	 	 	 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
index b8ab775..f10af8c 100644
--- a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
+++ b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
@@ -615,7 +615,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out
index 59437b8..a02a57c 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out
@@ -77,7 +77,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 Plan optimized by CBO.
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-4
       Create Table Operator:
         name:default.t
@@ -121,7 +121,7 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.t"}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/tez/explainanalyze_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_2.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_2.q.out
index 546ae60..e5b1d74 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_2.q.out
@@ -1129,7 +1129,7 @@ Reducer 8 <- Map 18 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
 Reducer 9 <- Reducer 8 (SIMPLE_EDGE), Union 20 (SIMPLE_EDGE), Union 5 (CONTAINS)
 
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.a"}
@@ -1324,13 +1324,13 @@ Stage-5
                   table:{"name:":"default.c"}
                    Please refer to the previous Select Operator [SEL_72]
 Stage-6
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.b"}
          Please refer to the previous Stage-4
 Stage-7
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-2
       Move Operator
         table:{"name:":"default.c"}
@@ -1436,7 +1436,7 @@ Reducer 7 <- Union 6 (SIMPLE_EDGE), Union 8 (CONTAINS)
 Reducer 9 <- Union 8 (SIMPLE_EDGE)
 
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.a"}
@@ -1675,13 +1675,13 @@ Stage-5
                 table:{"name:":"default.c"}
                  Please refer to the previous Group By Operator [GBY_112]
 Stage-6
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.b"}
          Please refer to the previous Stage-4
 Stage-7
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-2
       Move Operator
         table:{"name:":"default.c"}
@@ -1744,7 +1744,7 @@ Reducer 4 <- Union 3 (SIMPLE_EDGE)
 Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
 
 Stage-4
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.dest1"}
@@ -1791,7 +1791,7 @@ Stage-4
                           Output:["_col0","_col1","_col2"],aggregations:["count(DISTINCT substr(_col1, 5))"],keys:_col0, _col1
                            Please refer to the previous Group By Operator [GBY_11]
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.dest2"}
@@ -1976,7 +1976,7 @@ Reducer 4 <- Union 3 (SIMPLE_EDGE)
 Reducer 5 <- Union 3 (SIMPLE_EDGE)
 
 Stage-4
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.dest1"}
@@ -2035,7 +2035,7 @@ Stage-4
                     Output:["_col0","_col1","_col2"],aggregations:["count(DISTINCT KEY._col2:0._col0)"],keys:KEY._col0, KEY._col1
                   <- Please refer to the previous Union 3 [SIMPLE_EDGE]
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.dest2"}
@@ -2094,7 +2094,7 @@ Reducer 4 <- Union 3 (SIMPLE_EDGE)
 Reducer 5 <- Union 3 (SIMPLE_EDGE)
 
 Stage-4
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.dest1"}
@@ -2143,7 +2143,7 @@ Stage-4
                     Output:["_col0","_col1","_col2"],aggregations:["count(DISTINCT KEY._col2:0._col0)"],keys:KEY._col0, KEY._col1
                   <- Please refer to the previous Union 3 [SIMPLE_EDGE]
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.dest2"}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
index 8ea8ac7..f718a8c 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
@@ -225,7 +225,7 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain analyze analyze table src_stats compute statistics
 POSTHOOK: type: QUERY
 Stage-2
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Map 1
       TableScan [TS_0] (rows=500/500 width=10)
@@ -248,21 +248,19 @@ POSTHOOK: type: QUERY
 Vertex dependency in root stage
 Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
 
-Stage-3
-  Column Stats Work{}
-    Stage-2
-      Stats-Aggr Operator
-        Stage-0
-          Reducer 2
-          File Output Operator [FS_5]
-            Group By Operator [GBY_3] (rows=1/1 width=1248)
-              Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0, 'hll')","compute_stats(VALUE._col2, 'hll')"]
-            <-Map 1 [CUSTOM_SIMPLE_EDGE]
-              PARTITION_ONLY_SHUFFLE [RS_2]
-                Select Operator [SEL_1] (rows=500/500 width=350)
-                  Output:["key","value"]
-                  TableScan [TS_0] (rows=500/500 width=350)
-                    default@src_stats,src_stats,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+Stage-2
+  Stats Work{}
+    Stage-0
+      Reducer 2
+      File Output Operator [FS_5]
+        Group By Operator [GBY_3] (rows=1/1 width=1248)
+          Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0, 'hll')","compute_stats(VALUE._col2, 'hll')"]
+        <-Map 1 [CUSTOM_SIMPLE_EDGE]
+          PARTITION_ONLY_SHUFFLE [RS_2]
+            Select Operator [SEL_1] (rows=500/500 width=350)
+              Output:["key","value"]
+              TableScan [TS_0] (rows=500/500 width=350)
+                default@src_stats,src_stats,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
 
 PREHOOK: query: CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x))
 PREHOOK: type: CREATEMACRO
@@ -342,7 +340,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 Plan optimized by CBO.
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-4
       Create Table Operator:
         name:default.src_autho_test
@@ -653,7 +651,7 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.orc_merge5"}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
index 7cdc5c9..3558712 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
@@ -27,7 +27,7 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain analyze analyze table src_stats compute statistics
 POSTHOOK: type: QUERY
 Stage-2
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Map 1
       TableScan [TS_0] (rows=500/500 width=10)
@@ -50,21 +50,19 @@ POSTHOOK: type: QUERY
 Vertex dependency in root stage
 Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
 
-Stage-3
-  Column Stats Work{}
-    Stage-2
-      Stats-Aggr Operator
-        Stage-0
-          Reducer 2
-          File Output Operator [FS_5]
-            Group By Operator [GBY_3] (rows=1/1 width=1248)
-              Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0, 'hll')","compute_stats(VALUE._col2, 'hll')"]
-            <-Map 1 [CUSTOM_SIMPLE_EDGE]
-              PARTITION_ONLY_SHUFFLE [RS_2]
-                Select Operator [SEL_1] (rows=500/500 width=350)
-                  Output:["key","value"]
-                  TableScan [TS_0] (rows=500/500 width=350)
-                    default@src_stats,src_stats,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+Stage-2
+  Stats Work{}
+    Stage-0
+      Reducer 2
+      File Output Operator [FS_5]
+        Group By Operator [GBY_3] (rows=1/1 width=1248)
+          Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0, 'hll')","compute_stats(VALUE._col2, 'hll')"]
+        <-Map 1 [CUSTOM_SIMPLE_EDGE]
+          PARTITION_ONLY_SHUFFLE [RS_2]
+            Select Operator [SEL_1] (rows=500/500 width=350)
+              Output:["key","value"]
+              TableScan [TS_0] (rows=500/500 width=350)
+                default@src_stats,src_stats,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
 
 PREHOOK: query: drop table src_multi2
 PREHOOK: type: DROPTABLE
@@ -101,66 +99,64 @@ Reducer 3 <- Union 2 (SIMPLE_EDGE)
 Reducer 4 <- Map 7 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
 Reducer 5 <- Reducer 4 (CUSTOM_SIMPLE_EDGE)
 
-Stage-4
-  Column Stats Work{}
-    Stage-3
-      Stats-Aggr Operator
-        Stage-0
-          Move Operator
-            table:{"name:":"default.src_multi2"}
-            Stage-2
-              Dependency Collection{}
-                Stage-1
-                  Reducer 5
-                  File Output Operator [FS_5]
-                    Group By Operator [GBY_3] (rows=1/1 width=880)
-                      Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0, 'hll')","compute_stats(VALUE._col2, 'hll')"]
-                    <-Reducer 4 [CUSTOM_SIMPLE_EDGE]
-                      File Output Operator [FS_19]
-                        table:{"name:":"default.src_multi2"}
-                        Select Operator [SEL_18] (rows=849/508 width=178)
-                          Output:["_col0","_col1"]
-                          Merge Join Operator [MERGEJOIN_26] (rows=849/508 width=178)
-                            Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col0","_col3"]
-                          <-Map 7 [SIMPLE_EDGE]
-                            SHUFFLE [RS_16]
-                              PartitionCols:_col0
-                              Select Operator [SEL_14] (rows=500/500 width=178)
-                                Output:["_col0","_col1"]
-                                Filter Operator [FIL_25] (rows=500/500 width=178)
-                                  predicate:key is not null
-                                  TableScan [TS_12] (rows=500/500 width=178)
-                                    default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
-                          <-Reducer 3 [SIMPLE_EDGE]
-                            SHUFFLE [RS_15]
-                              PartitionCols:_col0
-                              Select Operator [SEL_11] (rows=525/319 width=178)
-                                Output:["_col0"]
-                                Group By Operator [GBY_10] (rows=525/319 width=178)
-                                  Output:["_col0","_col1"],keys:KEY._col0, KEY._col1
-                                <-Union 2 [SIMPLE_EDGE]
-                                  <-Map 1 [CONTAINS]
-                                    Reduce Output Operator [RS_9]
-                                      PartitionCols:_col0, _col1
-                                      Select Operator [SEL_2] (rows=500/500 width=178)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_23] (rows=500/500 width=178)
-                                          predicate:key is not null
-                                          TableScan [TS_0] (rows=500/500 width=178)
-                                            Output:["key","value"]
-                                  <-Map 6 [CONTAINS]
-                                    Reduce Output Operator [RS_9]
-                                      PartitionCols:_col0, _col1
-                                      Select Operator [SEL_5] (rows=25/25 width=175)
-                                        Output:["_col0","_col1"]
-                                        Filter Operator [FIL_24] (rows=25/25 width=175)
-                                          predicate:key is not null
-                                          TableScan [TS_3] (rows=25/25 width=175)
-                                            Output:["key","value"]
-                      PARTITION_ONLY_SHUFFLE [RS_2]
-                        Select Operator [SEL_1] (rows=849/508 width=178)
-                          Output:["key","value"]
-                           Please refer to the previous Select Operator [SEL_18]
+Stage-3
+  Stats Work{}
+    Stage-0
+      Move Operator
+        table:{"name:":"default.src_multi2"}
+        Stage-2
+          Dependency Collection{}
+            Stage-1
+              Reducer 5
+              File Output Operator [FS_5]
+                Group By Operator [GBY_3] (rows=1/1 width=880)
+                  Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0, 'hll')","compute_stats(VALUE._col2, 'hll')"]
+                <-Reducer 4 [CUSTOM_SIMPLE_EDGE]
+                  File Output Operator [FS_19]
+                    table:{"name:":"default.src_multi2"}
+                    Select Operator [SEL_18] (rows=849/508 width=178)
+                      Output:["_col0","_col1"]
+                      Merge Join Operator [MERGEJOIN_26] (rows=849/508 width=178)
+                        Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col0","_col3"]
+                      <-Map 7 [SIMPLE_EDGE]
+                        SHUFFLE [RS_16]
+                          PartitionCols:_col0
+                          Select Operator [SEL_14] (rows=500/500 width=178)
+                            Output:["_col0","_col1"]
+                            Filter Operator [FIL_25] (rows=500/500 width=178)
+                              predicate:key is not null
+                              TableScan [TS_12] (rows=500/500 width=178)
+                                default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+                      <-Reducer 3 [SIMPLE_EDGE]
+                        SHUFFLE [RS_15]
+                          PartitionCols:_col0
+                          Select Operator [SEL_11] (rows=525/319 width=178)
+                            Output:["_col0"]
+                            Group By Operator [GBY_10] (rows=525/319 width=178)
+                              Output:["_col0","_col1"],keys:KEY._col0, KEY._col1
+                            <-Union 2 [SIMPLE_EDGE]
+                              <-Map 1 [CONTAINS]
+                                Reduce Output Operator [RS_9]
+                                  PartitionCols:_col0, _col1
+                                  Select Operator [SEL_2] (rows=500/500 width=178)
+                                    Output:["_col0","_col1"]
+                                    Filter Operator [FIL_23] (rows=500/500 width=178)
+                                      predicate:key is not null
+                                      TableScan [TS_0] (rows=500/500 width=178)
+                                        Output:["key","value"]
+                              <-Map 6 [CONTAINS]
+                                Reduce Output Operator [RS_9]
+                                  PartitionCols:_col0, _col1
+                                  Select Operator [SEL_5] (rows=25/25 width=175)
+                                    Output:["_col0","_col1"]
+                                    Filter Operator [FIL_24] (rows=25/25 width=175)
+                                      predicate:key is not null
+                                      TableScan [TS_3] (rows=25/25 width=175)
+                                        Output:["key","value"]
+                  PARTITION_ONLY_SHUFFLE [RS_2]
+                    Select Operator [SEL_1] (rows=849/508 width=178)
+                      Output:["key","value"]
+                       Please refer to the previous Select Operator [SEL_18]
 
 PREHOOK: query: select count(*) from (select * from src union select * from src1)subq
 PREHOOK: type: QUERY
@@ -285,7 +281,7 @@ Vertex dependency in root stage
 Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.acid_uami"}
@@ -295,17 +291,17 @@ Stage-3
               Reducer 2
               File Output Operator [FS_8]
                 table:{"name:":"default.acid_uami"}
-                Select Operator [SEL_4] (rows=1/2 width=302)
+                Select Operator [SEL_4] (rows=1/2 width=328)
                   Output:["_col0","_col1","_col2","_col3"]
                 <-Map 1 [SIMPLE_EDGE]
                   SHUFFLE [RS_3]
                     PartitionCols:UDFToInteger(_col0)
-                    Select Operator [SEL_2] (rows=1/2 width=302)
+                    Select Operator [SEL_2] (rows=1/2 width=328)
                       Output:["_col0","_col1","_col3"]
-                      Filter Operator [FIL_9] (rows=1/2 width=226)
+                      Filter Operator [FIL_9] (rows=1/2 width=328)
                         predicate:((de = 109.23) or (de = 119.23))
-                        TableScan [TS_0] (rows=1/4 width=226)
-                          default@acid_uami,acid_uami, ACID table,Tbl:COMPLETE,Col:COMPLETE,Output:["i","de","vc"]
+                        TableScan [TS_0] (rows=1/4 width=328)
+                          default@acid_uami,acid_uami, ACID table,Tbl:COMPLETE,Col:NONE,Output:["i","de","vc"]
 
 PREHOOK: query: select * from acid_uami order by de
 PREHOOK: type: QUERY
@@ -398,7 +394,7 @@ Vertex dependency in root stage
 Reducer 2 <- Map 1 (SIMPLE_EDGE)
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.acid_dot"}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
index c9f5899..fa73dc6 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
@@ -189,7 +189,7 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain analyze table src compute statistics
 POSTHOOK: type: QUERY
 Stage-2
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Map 1 vectorized
       TableScan [TS_0] (rows=500 width=10)
@@ -202,23 +202,21 @@ POSTHOOK: type: QUERY
 Vertex dependency in root stage
 Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
 
-Stage-3
-  Column Stats Work{}
-    Stage-2
-      Stats-Aggr Operator
-        Stage-0
-          Reducer 2
-          File Output Operator [FS_6]
-            Group By Operator [GBY_4] (rows=1 width=880)
-              Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0)","compute_stats(VALUE._col1)"]
-            <-Map 1 [CUSTOM_SIMPLE_EDGE]
-              PARTITION_ONLY_SHUFFLE [RS_3]
-                Group By Operator [GBY_2] (rows=1 width=880)
-                  Output:["_col0","_col1"],aggregations:["compute_stats(key, 'hll')","compute_stats(value, 'hll')"]
-                  Select Operator [SEL_1] (rows=500 width=178)
-                    Output:["key","value"]
-                    TableScan [TS_0] (rows=500 width=178)
-                      default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+Stage-2
+  Stats Work{}
+    Stage-0
+      Reducer 2
+      File Output Operator [FS_6]
+        Group By Operator [GBY_4] (rows=1 width=880)
+          Output:["_col0","_col1"],aggregations:["compute_stats(VALUE._col0)","compute_stats(VALUE._col1)"]
+        <-Map 1 [CUSTOM_SIMPLE_EDGE]
+          PARTITION_ONLY_SHUFFLE [RS_3]
+            Group By Operator [GBY_2] (rows=1 width=880)
+              Output:["_col0","_col1"],aggregations:["compute_stats(key, 'hll')","compute_stats(value, 'hll')"]
+              Select Operator [SEL_1] (rows=500 width=178)
+                Output:["key","value"]
+                TableScan [TS_0] (rows=500 width=178)
+                  default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
 
 PREHOOK: query: explain
 CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x))
@@ -268,7 +266,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 Plan optimized by CBO.
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-4
       Create Table Operator:
         name:default.src_autho_test
@@ -497,7 +495,7 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.orc_merge5"}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/tunable_ndv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tunable_ndv.q.out b/ql/src/test/results/clientpositive/tunable_ndv.q.out
index ba2a0ea..a232eaa 100644
--- a/ql/src/test/results/clientpositive/tunable_ndv.q.out
+++ b/ql/src/test/results/clientpositive/tunable_ndv.q.out
@@ -60,12 +60,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_1d
 PREHOOK: Input: default@loc_orc_1d@year=2000
 PREHOOK: Input: default@loc_orc_1d@year=2001
+PREHOOK: Output: default@loc_orc_1d
+PREHOOK: Output: default@loc_orc_1d@year=2000
+PREHOOK: Output: default@loc_orc_1d@year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_1d compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_1d
 POSTHOOK: Input: default@loc_orc_1d@year=2000
 POSTHOOK: Input: default@loc_orc_1d@year=2001
+POSTHOOK: Output: default@loc_orc_1d
+POSTHOOK: Output: default@loc_orc_1d@year=2000
+POSTHOOK: Output: default@loc_orc_1d@year=2001
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted loc_orc_1d partition(year=2000) locid
 PREHOOK: type: DESCTABLE
@@ -197,41 +203,57 @@ PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2000') compu
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2000
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94086/year=2000
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2000') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2000
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2000
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2000
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94087/year=2000
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2000
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2000
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2001
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94086/year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2001
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2001
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94087/year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2001
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2001
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted loc_orc_2d locid
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/udf1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf1.q.out b/ql/src/test/results/clientpositive/udf1.q.out
index eebd90f..b59649b 100644
--- a/ql/src/test/results/clientpositive/udf1.q.out
+++ b/ql/src/test/results/clientpositive/udf1.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/udf3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf3.q.out b/ql/src/test/results/clientpositive/udf3.q.out
index 96038f1..e52a827 100644
--- a/ql/src/test/results/clientpositive/udf3.q.out
+++ b/ql/src/test/results/clientpositive/udf3.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(CAST('' AS INT)), sum(CAST('' AS INT)), avg(CAST('' AS INT)), 
 min(CAST('' AS INT)), max(CAST('' AS INT))

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/udf_10_trims.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_10_trims.q.out b/ql/src/test/results/clientpositive/udf_10_trims.q.out
index 3a5303a..d15dbc2 100644
--- a/ql/src/test/results/clientpositive/udf_10_trims.q.out
+++ b/ql/src/test/results/clientpositive/udf_10_trims.q.out
@@ -71,7 +71,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/udf_character_length.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_character_length.q.out b/ql/src/test/results/clientpositive/udf_character_length.q.out
index 332ec95..47ed678 100644
--- a/ql/src/test/results/clientpositive/udf_character_length.q.out
+++ b/ql/src/test/results/clientpositive/udf_character_length.q.out
@@ -92,7 +92,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/udf_length.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_length.q.out b/ql/src/test/results/clientpositive/udf_length.q.out
index fc795bb..e21df14 100644
--- a/ql/src/test/results/clientpositive/udf_length.q.out
+++ b/ql/src/test/results/clientpositive/udf_length.q.out
@@ -75,7 +75,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/udf_octet_length.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_octet_length.q.out b/ql/src/test/results/clientpositive/udf_octet_length.q.out
index f8738f8..4d742b1 100644
--- a/ql/src/test/results/clientpositive/udf_octet_length.q.out
+++ b/ql/src/test/results/clientpositive/udf_octet_length.q.out
@@ -75,7 +75,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/udf_reverse.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_reverse.q.out b/ql/src/test/results/clientpositive/udf_reverse.q.out
index 28b0c9f..316780e 100644
--- a/ql/src/test/results/clientpositive/udf_reverse.q.out
+++ b/ql/src/test/results/clientpositive/udf_reverse.q.out
@@ -75,7 +75,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union10.q.out b/ql/src/test/results/clientpositive/union10.q.out
index e14e5e0..f5e2b25 100644
--- a/ql/src/test/results/clientpositive/union10.q.out
+++ b/ql/src/test/results/clientpositive/union10.q.out
@@ -139,7 +139,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union12.q.out b/ql/src/test/results/clientpositive/union12.q.out
index 10540f9..a6cd633 100644
--- a/ql/src/test/results/clientpositive/union12.q.out
+++ b/ql/src/test/results/clientpositive/union12.q.out
@@ -139,7 +139,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union17.q.out b/ql/src/test/results/clientpositive/union17.q.out
index bff29f6..d997c29 100644
--- a/ql/src/test/results/clientpositive/union17.q.out
+++ b/ql/src/test/results/clientpositive/union17.q.out
@@ -164,7 +164,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -206,7 +207,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union18.q.out b/ql/src/test/results/clientpositive/union18.q.out
index 702ff10..5f737f6 100644
--- a/ql/src/test/results/clientpositive/union18.q.out
+++ b/ql/src/test/results/clientpositive/union18.q.out
@@ -157,7 +157,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -209,7 +210,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-11
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union19.q.out b/ql/src/test/results/clientpositive/union19.q.out
index 3553091..e4ff0d5 100644
--- a/ql/src/test/results/clientpositive/union19.q.out
+++ b/ql/src/test/results/clientpositive/union19.q.out
@@ -165,7 +165,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -178,7 +179,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union22.q.out b/ql/src/test/results/clientpositive/union22.q.out
index 9134bdf..e4b4a09 100644
--- a/ql/src/test/results/clientpositive/union22.q.out
+++ b/ql/src/test/results/clientpositive/union22.q.out
@@ -499,7 +499,8 @@ STAGE PLANS:
               name: default.dst_union22
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union25.q.out b/ql/src/test/results/clientpositive/union25.q.out
index a287a97..9eac674 100644
--- a/ql/src/test/results/clientpositive/union25.q.out
+++ b/ql/src/test/results/clientpositive/union25.q.out
@@ -197,5 +197,6 @@ STAGE PLANS:
           name: default.tmp_unionall
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union28.q.out b/ql/src/test/results/clientpositive/union28.q.out
index c3789d0..9b203f5 100644
--- a/ql/src/test/results/clientpositive/union28.q.out
+++ b/ql/src/test/results/clientpositive/union28.q.out
@@ -153,7 +153,8 @@ STAGE PLANS:
               name: default.union_subq_union
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union29.q.out b/ql/src/test/results/clientpositive/union29.q.out
index 87ba275..b933426 100644
--- a/ql/src/test/results/clientpositive/union29.q.out
+++ b/ql/src/test/results/clientpositive/union29.q.out
@@ -130,7 +130,8 @@ STAGE PLANS:
               name: default.union_subq_union
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union30.q.out b/ql/src/test/results/clientpositive/union30.q.out
index 862ebc1..3ee716b 100644
--- a/ql/src/test/results/clientpositive/union30.q.out
+++ b/ql/src/test/results/clientpositive/union30.q.out
@@ -188,7 +188,8 @@ STAGE PLANS:
               name: default.union_subq_union
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union31.q.out b/ql/src/test/results/clientpositive/union31.q.out
index b7a63fc..4edaed6 100644
--- a/ql/src/test/results/clientpositive/union31.q.out
+++ b/ql/src/test/results/clientpositive/union31.q.out
@@ -193,7 +193,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -236,7 +237,8 @@ STAGE PLANS:
               name: default.t4
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from
 (select * from t1
@@ -456,7 +458,8 @@ STAGE PLANS:
               name: default.t5
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -469,7 +472,8 @@ STAGE PLANS:
               name: default.t6
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-6
     Map Reduce
@@ -782,7 +786,8 @@ STAGE PLANS:
               name: default.t7
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -795,7 +800,8 @@ STAGE PLANS:
               name: default.t8
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from
 (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union33.q.out b/ql/src/test/results/clientpositive/union33.q.out
index 17aeecd..2709d99 100644
--- a/ql/src/test/results/clientpositive/union33.q.out
+++ b/ql/src/test/results/clientpositive/union33.q.out
@@ -156,7 +156,8 @@ STAGE PLANS:
               name: default.test_src
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -371,7 +372,8 @@ STAGE PLANS:
               name: default.test_src
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union4.q.out b/ql/src/test/results/clientpositive/union4.q.out
index f3cd59c..cdaa78f 100644
--- a/ql/src/test/results/clientpositive/union4.q.out
+++ b/ql/src/test/results/clientpositive/union4.q.out
@@ -119,7 +119,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union6.q.out b/ql/src/test/results/clientpositive/union6.q.out
index fc66cf1..f7a578c 100644
--- a/ql/src/test/results/clientpositive/union6.q.out
+++ b/ql/src/test/results/clientpositive/union6.q.out
@@ -116,7 +116,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union_lateralview.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_lateralview.q.out b/ql/src/test/results/clientpositive/union_lateralview.q.out
index f563476..c45bf75 100644
--- a/ql/src/test/results/clientpositive/union_lateralview.q.out
+++ b/ql/src/test/results/clientpositive/union_lateralview.q.out
@@ -190,7 +190,8 @@ STAGE PLANS:
               name: default.test_union_lateral_view
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_union_lateral_view
 SELECT b.key, d.arr_ele, d.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/union_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_stats.q.out b/ql/src/test/results/clientpositive/union_stats.q.out
index ae0ef9d..6585339 100644
--- a/ql/src/test/results/clientpositive/union_stats.q.out
+++ b/ql/src/test/results/clientpositive/union_stats.q.out
@@ -156,7 +156,8 @@ STAGE PLANS:
           name: default.t
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/updateAccessTime.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/updateAccessTime.q.out b/ql/src/test/results/clientpositive/updateAccessTime.q.out
index 777e84a..62e9f74 100644
--- a/ql/src/test/results/clientpositive/updateAccessTime.q.out
+++ b/ql/src/test/results/clientpositive/updateAccessTime.q.out
@@ -220,8 +220,10 @@ POSTHOOK: Output: default@src
 PREHOOK: query: ANALYZE TABLE src COMPUTE STATISTICS FOR COLUMNS key,value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src
+PREHOOK: Output: default@src
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE src COMPUTE STATISTICS FOR COLUMNS key,value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_bucket.q.out b/ql/src/test/results/clientpositive/vector_bucket.q.out
index 44e44a6..137a7ad 100644
--- a/ql/src/test/results/clientpositive/vector_bucket.q.out
+++ b/ql/src/test/results/clientpositive/vector_bucket.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.non_orc_table
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: select a, b from non_orc_table order by a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_char_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_4.q.out b/ql/src/test/results/clientpositive/vector_char_4.q.out
index d4eb4d7..e9b7668 100644
--- a/ql/src/test/results/clientpositive/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_4.q.out
@@ -201,7 +201,8 @@ STAGE PLANS:
               name: default.char_lazy_binary_columnar
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_const.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_const.q.out b/ql/src/test/results/clientpositive/vector_const.q.out
index 16e4089..d25673d 100644
--- a/ql/src/test/results/clientpositive/vector_const.q.out
+++ b/ql/src/test/results/clientpositive/vector_const.q.out
@@ -29,7 +29,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: varchar_const_1
-            Statistics: Num rows: 1 Data size: 182 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: 'FF' (type: varchar(4))
               outputColumnNames: _col0

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
index 7ef97b2..8896459 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
@@ -581,7 +581,8 @@ STAGE PLANS:
           name: default.DECIMAL_6_3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby4.q.out b/ql/src/test/results/clientpositive/vector_groupby4.q.out
index 460c735..d870e84 100644
--- a/ql/src/test/results/clientpositive/vector_groupby4.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby4.q.out
@@ -147,7 +147,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT substr(srcorc.key,1,1) GROUP BY substr(srcorc.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby6.q.out b/ql/src/test/results/clientpositive/vector_groupby6.q.out
index 6a64ff5..43f98d2 100644
--- a/ql/src/test/results/clientpositive/vector_groupby6.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby6.q.out
@@ -147,7 +147,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcorc
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(srcorc.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_if_expr_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_if_expr_2.q.out b/ql/src/test/results/clientpositive/vector_if_expr_2.q.out
index 8c901f8..40dc259 100644
--- a/ql/src/test/results/clientpositive/vector_if_expr_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_if_expr_2.q.out
@@ -38,7 +38,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: foo
-            Statistics: Num rows: 1 Data size: 258 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 20 Basic stats: COMPLETE Column stats: NONE
             TableScan Vectorization:
                 native: true
             Select Operator
@@ -49,7 +49,7 @@ STAGE PLANS:
                   native: true
                   projectedOutputColumnNums: [0, 4]
                   selectExpressions: IfExprLongColumnLongScalar(col 3:boolean, col 1:int, val 0)(children: LongColGreaterLongScalar(col 0:int, val 0) -> 3:boolean) -> 4:int
-              Statistics: Num rows: 1 Data size: 258 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 3 Data size: 20 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
@@ -58,7 +58,7 @@ STAGE PLANS:
                     native: false
                     nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-                Statistics: Num rows: 1 Data size: 258 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 20 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: int)
       Execution mode: vectorized
       Map Vectorization:
@@ -78,10 +78,10 @@ STAGE PLANS:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 258 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 3 Data size: 20 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 258 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 20 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_like_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_like_2.q.out b/ql/src/test/results/clientpositive/vector_like_2.q.out
index 30ee8f0..eef9094 100644
--- a/ql/src/test/results/clientpositive/vector_like_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_like_2.q.out
@@ -37,7 +37,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: foo
-            Statistics: Num rows: 1 Data size: 255 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 184 Basic stats: COMPLETE Column stats: NONE
             TableScan Vectorization:
                 native: true
                 vectorizationSchemaColumns: [0:a:string, 1:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
@@ -49,7 +49,7 @@ STAGE PLANS:
                   native: true
                   projectedOutputColumnNums: [0, 2]
                   selectExpressions: SelectStringColLikeStringScalar(col 0:string) -> 2:boolean
-              Statistics: Num rows: 1 Data size: 255 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 3 Data size: 184 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: string)
                 sort order: +
@@ -58,7 +58,7 @@ STAGE PLANS:
                     native: false
                     nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-                Statistics: Num rows: 1 Data size: 255 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 184 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: boolean)
       Execution mode: vectorized
       Map Vectorization:
@@ -84,10 +84,10 @@ STAGE PLANS:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: boolean)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 255 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 3 Data size: 184 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 255 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 184 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_multi_insert.q.out b/ql/src/test/results/clientpositive/vector_multi_insert.q.out
index de49fad..9fc92dd 100644
--- a/ql/src/test/results/clientpositive/vector_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/vector_multi_insert.q.out
@@ -186,7 +186,8 @@ STAGE PLANS:
               name: default.orc_rn1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -238,7 +239,8 @@ STAGE PLANS:
               name: default.orc_rn2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-11
     Map Reduce
@@ -290,7 +292,8 @@ STAGE PLANS:
               name: default.orc_rn3
 
   Stage: Stage-16
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-17
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
index b24e3b2..6dadcc6 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
@@ -186,10 +186,12 @@ POSTHOOK: Output: default@small_alltypesorc_a
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 PREHOOK: query: select * from small_alltypesorc_a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
index beb8439..bffc709 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
@@ -191,10 +191,12 @@ POSTHOOK: Output: default@small_alltypesorc_a
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 PREHOOK: query: select * from small_alltypesorc_a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
index 2095aa4..9c01cbe 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
@@ -191,10 +191,12 @@ POSTHOOK: Output: default@small_alltypesorc_a
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 PREHOOK: query: select * from small_alltypesorc_a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
index a8a2d4a..0a90230 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
@@ -201,10 +201,12 @@ POSTHOOK: Output: default@small_alltypesorc_b
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_b
+PREHOOK: Output: default@small_alltypesorc_b
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_b
+POSTHOOK: Output: default@small_alltypesorc_b
 #### A masked pattern was here ####
 PREHOOK: query: select * from small_alltypesorc_b
 PREHOOK: type: QUERY


[11/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out b/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out
index 1e13288..0f77b3d 100644
--- a/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out
+++ b/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out
@@ -104,7 +104,8 @@ STAGE PLANS:
               name: default.t1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -142,5 +143,6 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
index 6e19930..d1d7f8d 100644
--- a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
+++ b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
@@ -143,7 +143,8 @@ STAGE PLANS:
           name: default.dummy
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table dummy as
 select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
index 0e5b394..e0952c8 100644
--- a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
+++ b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
@@ -228,7 +228,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
index dbcef22..3c53a58 100644
--- a/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
@@ -228,7 +228,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out b/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out
index 5f02b04..6606edb 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out
@@ -101,7 +101,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -143,7 +144,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table dest1 select key, count(distinct value) group by key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out
index 256784d..ca1ebd7 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out
@@ -151,7 +151,8 @@ STAGE PLANS:
               name: default.dest_g4
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -164,7 +165,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -177,7 +179,8 @@ STAGE PLANS:
               name: default.dest_g3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1)
@@ -389,7 +392,8 @@ STAGE PLANS:
               name: default.dest_g4
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -402,7 +406,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -415,7 +420,8 @@ STAGE PLANS:
               name: default.dest_g3
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-9
     Map Reduce
@@ -510,7 +516,8 @@ STAGE PLANS:
               name: default.dest_h2
 
   Stage: Stage-11
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Move Operator
@@ -523,7 +530,8 @@ STAGE PLANS:
               name: default.dest_h3
 
   Stage: Stage-12
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
index 012b211..daa047d 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
@@ -104,7 +104,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -117,7 +118,8 @@ STAGE PLANS:
               name: default.dest_g3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
index 07997d2..496580b 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
@@ -116,7 +116,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -129,7 +130,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table e1
@@ -282,7 +284,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -295,7 +298,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table e1
@@ -448,7 +452,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -461,7 +466,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table e1
@@ -614,7 +620,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -627,7 +634,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table e1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_position.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_position.q.out b/ql/src/test/results/clientpositive/groupby_position.q.out
index 12c5f3f..16924f3 100644
--- a/ql/src/test/results/clientpositive/groupby_position.q.out
+++ b/ql/src/test/results/clientpositive/groupby_position.q.out
@@ -99,7 +99,8 @@ STAGE PLANS:
               name: default.testtable1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -141,7 +142,8 @@ STAGE PLANS:
               name: default.testtable2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1
@@ -289,7 +291,8 @@ STAGE PLANS:
               name: default.testtable1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -331,7 +334,8 @@ STAGE PLANS:
               name: default.testtable2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_ppr.q.out b/ql/src/test/results/clientpositive/groupby_ppr.q.out
index e645f5f..8a18187 100644
--- a/ql/src/test/results/clientpositive/groupby_ppr.q.out
+++ b/ql/src/test/results/clientpositive/groupby_ppr.q.out
@@ -221,7 +221,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
index f300095..6e4501d 100644
--- a/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
@@ -221,7 +221,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src
@@ -482,7 +483,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM srcpart src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_rollup1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/groupby_rollup1.q.out
index e050d0a..b7e93d9 100644
--- a/ql/src/test/results/clientpositive/groupby_rollup1.q.out
+++ b/ql/src/test/results/clientpositive/groupby_rollup1.q.out
@@ -497,7 +497,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -565,7 +566,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T1
 INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with rollup

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
index 1f12c52..80ec75a 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
@@ -200,7 +200,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -585,7 +586,8 @@ STAGE PLANS:
               name: default.outputtbl2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2
@@ -781,7 +783,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -1155,7 +1158,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -1537,7 +1541,8 @@ STAGE PLANS:
               name: default.outputtbl3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -1923,7 +1928,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -2123,7 +2129,8 @@ STAGE PLANS:
               name: default.outputtbl3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3
@@ -2335,7 +2342,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -2595,7 +2603,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -3168,7 +3177,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4
@@ -3591,7 +3601,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -4099,7 +4110,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -4293,7 +4305,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -4678,7 +4691,8 @@ STAGE PLANS:
               name: default.outputtbl5
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -5062,7 +5076,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -5453,7 +5468,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -5784,7 +5800,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -5797,7 +5814,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T2
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
@@ -5942,7 +5960,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -5955,7 +5974,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key, val from T2 where key = 8) x
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_2.q.out b/ql/src/test/results/clientpositive/groupby_sort_2.q.out
index bb6273e..9d41436 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_2.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_2.q.out
@@ -102,7 +102,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
 SELECT val, count(1) FROM T1 GROUP BY val

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_3.q.out b/ql/src/test/results/clientpositive/groupby_sort_3.q.out
index 2dae25d..67b5322 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_3.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_3.q.out
@@ -102,7 +102,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -237,7 +238,8 @@ STAGE PLANS:
               name: default.outputtbl2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_4.q.out b/ql/src/test/results/clientpositive/groupby_sort_4.q.out
index 70e8ac7..0b26978 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_4.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_4.q.out
@@ -102,7 +102,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
 SELECT key, count(1) FROM T1 GROUP BY key
@@ -204,7 +205,8 @@ STAGE PLANS:
               name: default.outputtbl2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2
 SELECT key, val, count(1) FROM T1 GROUP BY key, val

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_5.q.out b/ql/src/test/results/clientpositive/groupby_sort_5.q.out
index db18928..41e1782 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_5.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_5.q.out
@@ -102,7 +102,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -265,7 +266,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -436,7 +438,8 @@ STAGE PLANS:
               name: default.outputtbl2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2
 SELECT key, count(1) FROM T1 GROUP BY key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_6.q.out b/ql/src/test/results/clientpositive/groupby_sort_6.q.out
index 60019e7..a66ec97 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_6.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_6.q.out
@@ -132,7 +132,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -282,7 +283,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -470,7 +472,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_7.q.out b/ql/src/test/results/clientpositive/groupby_sort_7.q.out
index 9d535e2..c9746c3 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_7.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_7.q.out
@@ -105,7 +105,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
index fba8adb..3b3e227 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
@@ -200,7 +200,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -654,7 +655,8 @@ STAGE PLANS:
               name: default.outputtbl2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl2
@@ -850,7 +852,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -1224,7 +1227,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -1606,7 +1610,8 @@ STAGE PLANS:
               name: default.outputtbl3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -2061,7 +2066,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
@@ -2330,7 +2336,8 @@ STAGE PLANS:
               name: default.outputtbl3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3
@@ -2611,7 +2618,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -2871,7 +2879,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -3513,7 +3522,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4
@@ -3936,7 +3946,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -4582,7 +4593,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1
@@ -4776,7 +4788,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -5161,7 +5174,8 @@ STAGE PLANS:
               name: default.outputtbl5
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -5545,7 +5559,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -5936,7 +5951,8 @@ STAGE PLANS:
               name: default.outputtbl4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -6292,7 +6308,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -6305,7 +6322,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T2
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
@@ -6475,7 +6493,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -6488,7 +6507,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key, val from T2 where key = 8) x
 INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out b/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out
index d06cd7c..1723e75 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out
@@ -102,7 +102,8 @@ STAGE PLANS:
               name: default.outputtbl1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/hll.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/hll.q.out b/ql/src/test/results/clientpositive/hll.q.out
index 3b83384..3b90b56 100644
--- a/ql/src/test/results/clientpositive/hll.q.out
+++ b/ql/src/test/results/clientpositive/hll.q.out
@@ -58,7 +58,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key
           Column Types: int
@@ -67,10 +68,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table n compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@n
+PREHOOK: Output: default@n
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table n compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@n
+POSTHOOK: Output: default@n
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted n key
 PREHOOK: type: DESCTABLE
@@ -151,7 +154,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key
           Column Types: int
@@ -160,10 +164,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table i compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@i
+PREHOOK: Output: default@i
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table i compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@i
+POSTHOOK: Output: default@i
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted i key
 PREHOOK: type: DESCTABLE
@@ -212,10 +218,12 @@ POSTHOOK: Lineage: i.key EXPRESSION [(src)src.FieldSchema(name:key, type:string,
 PREHOOK: query: analyze table i compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@i
+PREHOOK: Output: default@i
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table i compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@i
+POSTHOOK: Output: default@i
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted i key
 PREHOOK: type: DESCTABLE
@@ -264,10 +272,12 @@ POSTHOOK: Lineage: i.key EXPRESSION [(src)src.FieldSchema(name:key, type:string,
 PREHOOK: query: analyze table i compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@i
+PREHOOK: Output: default@i
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table i compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@i
+POSTHOOK: Output: default@i
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted i key
 PREHOOK: type: DESCTABLE
@@ -342,10 +352,12 @@ POSTHOOK: Lineage: i.key EXPRESSION [(values__tmp__table__5)values__tmp__table__
 PREHOOK: query: analyze table i compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@i
+PREHOOK: Output: default@i
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table i compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@i
+POSTHOOK: Output: default@i
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted i key
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/implicit_cast_during_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/implicit_cast_during_insert.q.out b/ql/src/test/results/clientpositive/implicit_cast_during_insert.q.out
index c4b9dc4..1fa9e94 100644
--- a/ql/src/test/results/clientpositive/implicit_cast_during_insert.q.out
+++ b/ql/src/test/results/clientpositive/implicit_cast_during_insert.q.out
@@ -70,7 +70,8 @@ STAGE PLANS:
               name: default.implicit_cast_during_insert
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table implicit_cast_during_insert partition (p1)
   select key, value, key key1 from (select * from src where key in (0,1)) q

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/index_auto_update.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_update.q.out b/ql/src/test/results/clientpositive/index_auto_update.q.out
index e7bc069..bc444cc 100644
--- a/ql/src/test/results/clientpositive/index_auto_update.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_update.q.out
@@ -135,14 +135,16 @@ STAGE PLANS:
               name: default.default__temp_temp_index__
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
 
   Stage: Stage-5
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-8
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
index 3d1443b..cba50a7 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
@@ -486,7 +486,8 @@ STAGE PLANS:
               name: default.test_table
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
index 7fec770..8ae7320 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
@@ -82,7 +82,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: SELECT key, value, count(1) FROM src GROUP BY ROLLUP (key, value)
 PREHOOK: type: QUERY
@@ -1510,7 +1511,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
 SELECT key, value, count(1) FROM src GROUP BY key, value WITH CUBE
@@ -1684,7 +1686,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
 SELECT key, value, count(1) FROM src GROUP BY key, value GROUPING SETS (key, value)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
index 041d053..f2218e3 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
@@ -114,7 +114,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -293,7 +294,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
 SELECT a.key, a.value FROM (
@@ -422,7 +424,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -575,7 +578,8 @@ STAGE PLANS:
               name: default.test_table_out
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1')
 SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
index 5086a97..5c7659b 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_num_buckets.q.out
@@ -71,7 +71,8 @@ STAGE PLANS:
               name: default.test_table
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr)
 SELECT key2, value, cast(hr as int) FROM

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/innerjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/innerjoin.q.out b/ql/src/test/results/clientpositive/innerjoin.q.out
index 99b3d85..741c9fb 100644
--- a/ql/src/test/results/clientpositive/innerjoin.q.out
+++ b/ql/src/test/results/clientpositive/innerjoin.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input11.q.out b/ql/src/test/results/clientpositive/input11.q.out
index bb22ee8..356cef1 100644
--- a/ql/src/test/results/clientpositive/input11.q.out
+++ b/ql/src/test/results/clientpositive/input11.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input11_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input11_limit.q.out b/ql/src/test/results/clientpositive/input11_limit.q.out
index 597554e..8fb7b63 100644
--- a/ql/src/test/results/clientpositive/input11_limit.q.out
+++ b/ql/src/test/results/clientpositive/input11_limit.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input12.q.out b/ql/src/test/results/clientpositive/input12.q.out
index cd9d52e..8bb52c0 100644
--- a/ql/src/test/results/clientpositive/input12.q.out
+++ b/ql/src/test/results/clientpositive/input12.q.out
@@ -131,7 +131,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -183,7 +184,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-11
     Map Reduce
@@ -238,7 +240,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-16
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-17
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input13.q.out b/ql/src/test/results/clientpositive/input13.q.out
index c594ce3..9b1970b 100644
--- a/ql/src/test/results/clientpositive/input13.q.out
+++ b/ql/src/test/results/clientpositive/input13.q.out
@@ -153,7 +153,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-6
     Map Reduce
@@ -205,7 +206,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-11
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-12
     Map Reduce
@@ -260,7 +262,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-17
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-18
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input14.q.out b/ql/src/test/results/clientpositive/input14.q.out
index af04a98..d845526 100644
--- a/ql/src/test/results/clientpositive/input14.q.out
+++ b/ql/src/test/results/clientpositive/input14.q.out
@@ -81,7 +81,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input14_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input14_limit.q.out b/ql/src/test/results/clientpositive/input14_limit.q.out
index 9870ad5..f62ad05 100644
--- a/ql/src/test/results/clientpositive/input14_limit.q.out
+++ b/ql/src/test/results/clientpositive/input14_limit.q.out
@@ -116,7 +116,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input17.q.out b/ql/src/test/results/clientpositive/input17.q.out
index 057a92d..316c9c6 100644
--- a/ql/src/test/results/clientpositive/input17.q.out
+++ b/ql/src/test/results/clientpositive/input17.q.out
@@ -78,7 +78,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src_thrift

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input18.q.out b/ql/src/test/results/clientpositive/input18.q.out
index b341510..4eb75ff 100644
--- a/ql/src/test/results/clientpositive/input18.q.out
+++ b/ql/src/test/results/clientpositive/input18.q.out
@@ -81,7 +81,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input1_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input1_limit.q.out b/ql/src/test/results/clientpositive/input1_limit.q.out
index 0ca1552..649e8fb 100644
--- a/ql/src/test/results/clientpositive/input1_limit.q.out
+++ b/ql/src/test/results/clientpositive/input1_limit.q.out
@@ -102,7 +102,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -145,7 +146,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input20.q.out b/ql/src/test/results/clientpositive/input20.q.out
index 9587445..051e94e 100644
--- a/ql/src/test/results/clientpositive/input20.q.out
+++ b/ql/src/test/results/clientpositive/input20.q.out
@@ -94,7 +94,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input30.q.out b/ql/src/test/results/clientpositive/input30.q.out
index 478cea1..90c9732 100644
--- a/ql/src/test/results/clientpositive/input30.q.out
+++ b/ql/src/test/results/clientpositive/input30.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.tst_dest30
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table dest30
 select count(1) from src
@@ -159,7 +160,8 @@ STAGE PLANS:
               name: default.dest30
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table dest30
 select count(1) from src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input31.q.out b/ql/src/test/results/clientpositive/input31.q.out
index ea2c8f9..d3c2c6a 100644
--- a/ql/src/test/results/clientpositive/input31.q.out
+++ b/ql/src/test/results/clientpositive/input31.q.out
@@ -78,7 +78,8 @@ STAGE PLANS:
               name: default.tst_dest31
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table dest31
 select count(1) from srcbucket

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input32.q.out b/ql/src/test/results/clientpositive/input32.q.out
index d3426a8..fa3b731 100644
--- a/ql/src/test/results/clientpositive/input32.q.out
+++ b/ql/src/test/results/clientpositive/input32.q.out
@@ -75,7 +75,8 @@ STAGE PLANS:
               name: default.tst_dest32
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table dest32
 select count(1) from srcbucket

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input33.q.out b/ql/src/test/results/clientpositive/input33.q.out
index 4be5fc5..c24ac6e 100644
--- a/ql/src/test/results/clientpositive/input33.q.out
+++ b/ql/src/test/results/clientpositive/input33.q.out
@@ -94,7 +94,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input34.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input34.q.out b/ql/src/test/results/clientpositive/input34.q.out
index 72f66c3..841f0af 100644
--- a/ql/src/test/results/clientpositive/input34.q.out
+++ b/ql/src/test/results/clientpositive/input34.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input35.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input35.q.out b/ql/src/test/results/clientpositive/input35.q.out
index 8b86991..76a014e 100644
--- a/ql/src/test/results/clientpositive/input35.q.out
+++ b/ql/src/test/results/clientpositive/input35.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input36.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input36.q.out b/ql/src/test/results/clientpositive/input36.q.out
index 76921ad..f75f8fd 100644
--- a/ql/src/test/results/clientpositive/input36.q.out
+++ b/ql/src/test/results/clientpositive/input36.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input38.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input38.q.out b/ql/src/test/results/clientpositive/input38.q.out
index 0c4e81d..41a0dbd 100644
--- a/ql/src/test/results/clientpositive/input38.q.out
+++ b/ql/src/test/results/clientpositive/input38.q.out
@@ -79,7 +79,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input3_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input3_limit.q.out b/ql/src/test/results/clientpositive/input3_limit.q.out
index 07b84ed..d187ee2 100644
--- a/ql/src/test/results/clientpositive/input3_limit.q.out
+++ b/ql/src/test/results/clientpositive/input3_limit.q.out
@@ -107,7 +107,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input4.q.out b/ql/src/test/results/clientpositive/input4.q.out
index bbcce4a..d9709ad 100644
--- a/ql/src/test/results/clientpositive/input4.q.out
+++ b/ql/src/test/results/clientpositive/input4.q.out
@@ -28,7 +28,8 @@ STAGE PLANS:
               name: default.input4
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
 PREHOOK: type: LOAD

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input5.q.out b/ql/src/test/results/clientpositive/input5.q.out
index a399528..75a827c 100644
--- a/ql/src/test/results/clientpositive/input5.q.out
+++ b/ql/src/test/results/clientpositive/input5.q.out
@@ -78,7 +78,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
   FROM src_thrift

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input6.q.out b/ql/src/test/results/clientpositive/input6.q.out
index 3d1a815..35e746a 100644
--- a/ql/src/test/results/clientpositive/input6.q.out
+++ b/ql/src/test/results/clientpositive/input6.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input7.q.out b/ql/src/test/results/clientpositive/input7.q.out
index 0545b1f..281f6d4 100644
--- a/ql/src/test/results/clientpositive/input7.q.out
+++ b/ql/src/test/results/clientpositive/input7.q.out
@@ -64,7 +64,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input8.q.out b/ql/src/test/results/clientpositive/input8.q.out
index 0a930fe..97ccb0d 100644
--- a/ql/src/test/results/clientpositive/input8.q.out
+++ b/ql/src/test/results/clientpositive/input8.q.out
@@ -64,7 +64,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input9.q.out b/ql/src/test/results/clientpositive/input9.q.out
index af752e0..8df7fb7 100644
--- a/ql/src/test/results/clientpositive/input9.q.out
+++ b/ql/src/test/results/clientpositive/input9.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_columnarserde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_columnarserde.q.out b/ql/src/test/results/clientpositive/input_columnarserde.q.out
index afa0e28..95d0e6e 100644
--- a/ql/src/test/results/clientpositive/input_columnarserde.q.out
+++ b/ql/src/test/results/clientpositive/input_columnarserde.q.out
@@ -70,7 +70,8 @@ STAGE PLANS:
               name: default.input_columnarserde
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src_thrift
 INSERT OVERWRITE TABLE input_columnarserde SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_dynamicserde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_dynamicserde.q.out b/ql/src/test/results/clientpositive/input_dynamicserde.q.out
index 30493be..a117873 100644
--- a/ql/src/test/results/clientpositive/input_dynamicserde.q.out
+++ b/ql/src/test/results/clientpositive/input_dynamicserde.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_lazyserde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_lazyserde.q.out b/ql/src/test/results/clientpositive/input_lazyserde.q.out
index 64dc6c1..473b5fe 100644
--- a/ql/src/test/results/clientpositive/input_lazyserde.q.out
+++ b/ql/src/test/results/clientpositive/input_lazyserde.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src_thrift
 INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_lazyserde2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_lazyserde2.q.out b/ql/src/test/results/clientpositive/input_lazyserde2.q.out
index 82c72db..d658334 100644
--- a/ql/src/test/results/clientpositive/input_lazyserde2.q.out
+++ b/ql/src/test/results/clientpositive/input_lazyserde2.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src_thrift
 INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_part1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part1.q.out b/ql/src/test/results/clientpositive/input_part1.q.out
index a685446..9a9b192 100644
--- a/ql/src/test/results/clientpositive/input_part1.q.out
+++ b/ql/src/test/results/clientpositive/input_part1.q.out
@@ -164,7 +164,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_part10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part10.q.out b/ql/src/test/results/clientpositive/input_part10.q.out
index 5ec24a7..e8ad15b 100644
--- a/ql/src/test/results/clientpositive/input_part10.q.out
+++ b/ql/src/test/results/clientpositive/input_part10.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.part_special
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE part_special PARTITION(ds='2008 04 08', ts = '10:11:12=455')
 SELECT 1, 2 FROM src LIMIT 1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part2.q.out b/ql/src/test/results/clientpositive/input_part2.q.out
index 0211d50..6942b23 100644
--- a/ql/src/test/results/clientpositive/input_part2.q.out
+++ b/ql/src/test/results/clientpositive/input_part2.q.out
@@ -270,7 +270,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4
@@ -491,7 +492,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_part5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part5.q.out b/ql/src/test/results/clientpositive/input_part5.q.out
index c6ae2fd..7da77fb 100644
--- a/ql/src/test/results/clientpositive/input_part5.q.out
+++ b/ql/src/test/results/clientpositive/input_part5.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_testsequencefile.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_testsequencefile.q.out b/ql/src/test/results/clientpositive/input_testsequencefile.q.out
index 60aaf83..c248d03 100644
--- a/ql/src/test/results/clientpositive/input_testsequencefile.q.out
+++ b/ql/src/test/results/clientpositive/input_testsequencefile.q.out
@@ -64,7 +64,8 @@ STAGE PLANS:
               name: default.dest4_sequencefile
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_testxpath.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_testxpath.q.out b/ql/src/test/results/clientpositive/input_testxpath.q.out
index e07628a..a68a500 100644
--- a/ql/src/test/results/clientpositive/input_testxpath.q.out
+++ b/ql/src/test/results/clientpositive/input_testxpath.q.out
@@ -64,7 +64,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/input_testxpath2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_testxpath2.q.out b/ql/src/test/results/clientpositive/input_testxpath2.q.out
index a0baccf..ed45157 100644
--- a/ql/src/test/results/clientpositive/input_testxpath2.q.out
+++ b/ql/src/test/results/clientpositive/input_testxpath2.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert1.q.out b/ql/src/test/results/clientpositive/insert1.q.out
index 5dffac7..aeb89eb 100644
--- a/ql/src/test/results/clientpositive/insert1.q.out
+++ b/ql/src/test/results/clientpositive/insert1.q.out
@@ -81,7 +81,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -170,7 +171,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -273,7 +275,8 @@ STAGE PLANS:
               name: x.insert1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -362,7 +365,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -479,7 +483,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -531,7 +536,8 @@ STAGE PLANS:
               name: x.insert1
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-10
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out b/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out
index 49c1269..a9378f8 100644
--- a/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out
+++ b/ql/src/test/results/clientpositive/insert1_overwrite_partitions.q.out
@@ -90,7 +90,8 @@ STAGE PLANS:
               name: default.destintable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE destinTable PARTITION (ds='2011-11-11', hr='11') if not exists
 SELECT one,two FROM sourceTable WHERE ds='2011-11-11' AND hr='11' order by one desc, two desc limit 5
@@ -220,7 +221,8 @@ STAGE PLANS:
               name: default.destintable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE destinTable SELECT one,two FROM sourceTable WHERE ds='2011-11-11' AND hr='11' order by one desc, two desc limit 5
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out b/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out
index b5f28d2..c8bfdc6 100644
--- a/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out
+++ b/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out
@@ -100,7 +100,8 @@ STAGE PLANS:
               name: db2.destintable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE db2.destinTable PARTITION (ds='2011-11-11')
 SELECT one,two FROM db1.sourceTable WHERE ds='2011-11-11' order by one desc, two desc limit 5
@@ -188,7 +189,8 @@ STAGE PLANS:
               name: db2.destintable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE db2.destinTable PARTITION (ds='2011-11-11')
 SELECT one,two FROM db1.sourceTable WHERE ds='2011-11-11' order by one desc, two desc limit 5

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into1.q.out b/ql/src/test/results/clientpositive/insert_into1.q.out
index da863a7..3d1438a 100644
--- a/ql/src/test/results/clientpositive/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/insert_into1.q.out
@@ -68,7 +68,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 100
 PREHOOK: type: QUERY
@@ -176,7 +177,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 100
 PREHOOK: type: QUERY
@@ -284,7 +286,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 10
 PREHOOK: type: QUERY
@@ -389,7 +392,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -486,7 +490,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into2.q.out b/ql/src/test/results/clientpositive/insert_into2.q.out
index 46fab7b..90b409c 100644
--- a/ql/src/test/results/clientpositive/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/insert_into2.q.out
@@ -74,7 +74,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src order by key limit 100
 PREHOOK: type: QUERY
@@ -223,7 +224,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
   SELECT * FROM src order by key LIMIT 100
@@ -341,7 +343,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
   SELECT * FROM src order by key LIMIT 50

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert_into3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into3.q.out b/ql/src/test/results/clientpositive/insert_into3.q.out
index ae7523b..4bda7d9 100644
--- a/ql/src/test/results/clientpositive/insert_into3.q.out
+++ b/ql/src/test/results/clientpositive/insert_into3.q.out
@@ -94,7 +94,8 @@ STAGE PLANS:
               name: default.insert_into3a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -137,7 +138,8 @@ STAGE PLANS:
               name: default.insert_into3b
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
          INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100
@@ -259,7 +261,8 @@ STAGE PLANS:
               name: default.insert_into3a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -302,7 +305,8 @@ STAGE PLANS:
               name: default.insert_into3b
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
          INSERT INTO TABLE insert_into3b SELECT * LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert_into4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into4.q.out b/ql/src/test/results/clientpositive/insert_into4.q.out
index bb4e557..931ae3d 100644
--- a/ql/src/test/results/clientpositive/insert_into4.q.out
+++ b/ql/src/test/results/clientpositive/insert_into4.q.out
@@ -82,7 +82,8 @@ STAGE PLANS:
               name: default.insert_into4a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into4a SELECT * FROM src LIMIT 10
 PREHOOK: type: QUERY
@@ -167,7 +168,8 @@ STAGE PLANS:
               name: default.insert_into4a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into4a SELECT * FROM src LIMIT 10
 PREHOOK: type: QUERY
@@ -246,7 +248,8 @@ STAGE PLANS:
               name: default.insert_into4b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert_into5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into5.q.out b/ql/src/test/results/clientpositive/insert_into5.q.out
index 7b471f4..5e52e49 100644
--- a/ql/src/test/results/clientpositive/insert_into5.q.out
+++ b/ql/src/test/results/clientpositive/insert_into5.q.out
@@ -78,7 +78,8 @@ STAGE PLANS:
               name: default.insert_into5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into5a SELECT 1, 'one' FROM src LIMIT 10
 PREHOOK: type: QUERY
@@ -157,7 +158,8 @@ STAGE PLANS:
               name: default.insert_into5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -270,7 +272,8 @@ STAGE PLANS:
               name: default.insert_into5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -385,7 +388,8 @@ STAGE PLANS:
               name: default.insert_into5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert_into6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into6.q.out b/ql/src/test/results/clientpositive/insert_into6.q.out
index d93a167..964e259 100644
--- a/ql/src/test/results/clientpositive/insert_into6.q.out
+++ b/ql/src/test/results/clientpositive/insert_into6.q.out
@@ -86,7 +86,8 @@ STAGE PLANS:
               name: default.insert_into6a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into6a PARTITION (ds='1') SELECT * FROM src LIMIT 150
 PREHOOK: type: QUERY
@@ -183,7 +184,8 @@ STAGE PLANS:
               name: default.insert_into6b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out b/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
index 33c785a..8e610c9 100644
--- a/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
+++ b/ql/src/test/results/clientpositive/insert_values_orig_table_use_metadata.q.out
@@ -166,6 +166,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	1                   
 	numRows             	0                   
 	rawDataSize         	0                   
@@ -358,6 +359,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	1                   
 	numRows             	0                   
 	rawDataSize         	0                   
@@ -479,6 +481,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	2                   
 	numRows             	0                   
 	rawDataSize         	0                   
@@ -598,6 +601,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	3                   
 	numRows             	0                   
 	rawDataSize         	0                   
@@ -926,6 +930,7 @@ Database:           	default
 Table:              	sp                  	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{}                  
 	numFiles            	2                   
 	numRows             	0                   
 	rawDataSize         	0                   

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out b/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out
index c2732c8..4c020fd 100644
--- a/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out
+++ b/ql/src/test/results/clientpositive/insertoverwrite_bucket.q.out
@@ -142,7 +142,8 @@ STAGE PLANS:
               name: default.temp1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: CREATE TABLE temp2
 (
@@ -221,7 +222,8 @@ STAGE PLANS:
               name: default.temp2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: select * from bucketoutput1 a join bucketoutput2 b on (a.data=b.data)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join14.q.out b/ql/src/test/results/clientpositive/join14.q.out
index 66e42f1..10b4e1f 100644
--- a/ql/src/test/results/clientpositive/join14.q.out
+++ b/ql/src/test/results/clientpositive/join14.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
 INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join17.q.out b/ql/src/test/results/clientpositive/join17.q.out
index f9edc79..6c3e5fe 100644
--- a/ql/src/test/results/clientpositive/join17.q.out
+++ b/ql/src/test/results/clientpositive/join17.q.out
@@ -192,7 +192,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join2.q.out b/ql/src/test/results/clientpositive/join2.q.out
index e3d26a2..b206742 100644
--- a/ql/src/test/results/clientpositive/join2.q.out
+++ b/ql/src/test/results/clientpositive/join2.q.out
@@ -129,7 +129,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key)
 INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join25.q.out b/ql/src/test/results/clientpositive/join25.q.out
index 5ad95c5..8ed420b 100644
--- a/ql/src/test/results/clientpositive/join25.q.out
+++ b/ql/src/test/results/clientpositive/join25.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join26.q.out b/ql/src/test/results/clientpositive/join26.q.out
index b41fd8e..134aa52 100644
--- a/ql/src/test/results/clientpositive/join26.q.out
+++ b/ql/src/test/results/clientpositive/join26.q.out
@@ -313,7 +313,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join27.q.out b/ql/src/test/results/clientpositive/join27.q.out
index 8b43f3f..2e78d2a 100644
--- a/ql/src/test/results/clientpositive/join27.q.out
+++ b/ql/src/test/results/clientpositive/join27.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(x) */ x.key, x.value, y.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join28.q.out b/ql/src/test/results/clientpositive/join28.q.out
index 309bdcd..c4b122f 100644
--- a/ql/src/test/results/clientpositive/join28.q.out
+++ b/ql/src/test/results/clientpositive/join28.q.out
@@ -129,7 +129,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT subq.key1, z.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join29.q.out b/ql/src/test/results/clientpositive/join29.q.out
index ef02385..d394eda 100644
--- a/ql/src/test/results/clientpositive/join29.q.out
+++ b/ql/src/test/results/clientpositive/join29.q.out
@@ -121,7 +121,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-9
     Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join3.q.out b/ql/src/test/results/clientpositive/join3.q.out
index fb378f4..8c4cfe2 100644
--- a/ql/src/test/results/clientpositive/join3.q.out
+++ b/ql/src/test/results/clientpositive/join3.q.out
@@ -104,7 +104,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join30.q.out b/ql/src/test/results/clientpositive/join30.q.out
index f06c70a..cdd5445 100644
--- a/ql/src/test/results/clientpositive/join30.q.out
+++ b/ql/src/test/results/clientpositive/join30.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT /*+ MAPJOIN(x) */ x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join31.q.out b/ql/src/test/results/clientpositive/join31.q.out
index e055b5d..7b0cbdc 100644
--- a/ql/src/test/results/clientpositive/join31.q.out
+++ b/ql/src/test/results/clientpositive/join31.q.out
@@ -145,7 +145,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT subq1.key, count(1) as cnt

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out
index 176989c..30a5ba9 100644
--- a/ql/src/test/results/clientpositive/join32.q.out
+++ b/ql/src/test/results/clientpositive/join32.q.out
@@ -366,7 +366,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1


[14/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out
new file mode 100644
index 0000000..d97e1c6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out
@@ -0,0 +1,997 @@
+PREHOOK: query: CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@partitioned1
+POSTHOOK: query: CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@partitioned1
+PREHOOK: query: explain extended 
+insert into table partitioned1 partition(part=1) values(1, 'original')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended 
+insert into table partitioned1 partition(part=1) values(1, 'original')
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: values__tmp__table__1
+            Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: part=1/
+                Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      column.name.delimiter ,
+                      columns a,b
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.partitioned1
+                      partition_columns.types int
+                      serialization.ddl struct partitioned1 { i32 a, string b}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.partitioned1
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
+                outputColumnNames: a, b, part
+                Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                  keys: part (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: a
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    tag: -1
+                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+                    auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: Values__Tmp__Table__1
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              column.name.delimiter ,
+              columns tmp_values_col1,tmp_values_col2
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.values__tmp__table__1
+              serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                column.name.delimiter ,
+                columns tmp_values_col1,tmp_values_col2
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.values__tmp__table__1
+                serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.values__tmp__table__1
+            name: default.values__tmp__table__1
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2
+                    columns.types struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>:struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>:int
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.escape.crlf true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            part 1
+          replace: false
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                column.name.delimiter ,
+                columns a,b
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.partitioned1
+                partition_columns.types int
+                serialization.ddl struct partitioned1 { i32 a, string b}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.partitioned1
+
+  Stage: Stage-2
+    Stats Work
+      Basic Stats Work:
+#### A masked pattern was here ####
+      Column Stats Desc:
+          Columns: a, b
+          Column Types: int, string
+          Table: default.partitioned1
+          Is Table Level Stats: false
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            GatherStats: false
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    column.name.delimiter ,
+                    columns a,b
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.partitioned1
+                    partition_columns part
+                    partition_columns.types int
+                    serialization.ddl struct partitioned1 { i32 a, string b}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.partitioned1
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: -ext-10002
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              column.name.delimiter ,
+              columns a,b
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.partitioned1
+              partition_columns.types int
+              serialization.ddl struct partitioned1 { i32 a, string b}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                column.name.delimiter ,
+                columns a,b
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.partitioned1
+                partition_columns.types int
+                serialization.ddl struct partitioned1 { i32 a, string b}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.partitioned1
+            name: default.partitioned1
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            GatherStats: false
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    column.name.delimiter ,
+                    columns a,b
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.partitioned1
+                    partition_columns part
+                    partition_columns.types int
+                    serialization.ddl struct partitioned1 { i32 a, string b}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.partitioned1
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: -ext-10002
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              column.name.delimiter ,
+              columns a,b
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.partitioned1
+              partition_columns.types int
+              serialization.ddl struct partitioned1 { i32 a, string b}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                column.name.delimiter ,
+                columns a,b
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.partitioned1
+                partition_columns.types int
+                serialization.ddl struct partitioned1 { i32 a, string b}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.partitioned1
+            name: default.partitioned1
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@partitioned1@part=1
+POSTHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@partitioned1@part=1
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+_col0	_col1
+PREHOOK: query: desc formatted partitioned1 partition(part=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name	data_type	comment
+# col_name            	data_type           	comment             
+a                   	int                 	                    
+b                   	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+part                	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	partitioned1        	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}}
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	10                  
+	totalSize           	11                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain extended 
+insert into table partitioned1 partition(part=1) values(2, 'original'), (3, 'original'),(4, 'original')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended 
+insert into table partitioned1 partition(part=1) values(2, 'original'), (3, 'original'),(4, 'original')
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: values__tmp__table__3
+            Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: part=1/
+                Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      column.name.delimiter ,
+                      columns a,b
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.partitioned1
+                      partition_columns.types int
+                      serialization.ddl struct partitioned1 { i32 a, string b}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.partitioned1
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
+                outputColumnNames: a, b, part
+                Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                  keys: part (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: a
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                    tag: -1
+                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+                    auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: Values__Tmp__Table__3
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              column.name.delimiter ,
+              columns tmp_values_col1,tmp_values_col2
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.values__tmp__table__3
+              serialization.ddl struct values__tmp__table__3 { string tmp_values_col1, string tmp_values_col2}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                column.name.delimiter ,
+                columns tmp_values_col1,tmp_values_col2
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.values__tmp__table__3
+                serialization.ddl struct values__tmp__table__3 { string tmp_values_col1, string tmp_values_col2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.values__tmp__table__3
+            name: default.values__tmp__table__3
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 1 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2
+                    columns.types struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>:struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>:int
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.escape.crlf true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            part 1
+          replace: false
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                column.name.delimiter ,
+                columns a,b
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.partitioned1
+                partition_columns.types int
+                serialization.ddl struct partitioned1 { i32 a, string b}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.partitioned1
+
+  Stage: Stage-2
+    Stats Work
+      Basic Stats Work:
+#### A masked pattern was here ####
+      Column Stats Desc:
+          Columns: a, b
+          Column Types: int, string
+          Table: default.partitioned1
+          Is Table Level Stats: false
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            GatherStats: false
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    column.name.delimiter ,
+                    columns a,b
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.partitioned1
+                    partition_columns part
+                    partition_columns.types int
+                    serialization.ddl struct partitioned1 { i32 a, string b}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.partitioned1
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: -ext-10002
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              column.name.delimiter ,
+              columns a,b
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.partitioned1
+              partition_columns.types int
+              serialization.ddl struct partitioned1 { i32 a, string b}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                column.name.delimiter ,
+                columns a,b
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.partitioned1
+                partition_columns.types int
+                serialization.ddl struct partitioned1 { i32 a, string b}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.partitioned1
+            name: default.partitioned1
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            GatherStats: false
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count -1
+                    column.name.delimiter ,
+                    columns a,b
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.partitioned1
+                    partition_columns part
+                    partition_columns.types int
+                    serialization.ddl struct partitioned1 { i32 a, string b}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.partitioned1
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: -ext-10002
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              column.name.delimiter ,
+              columns a,b
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.partitioned1
+              partition_columns.types int
+              serialization.ddl struct partitioned1 { i32 a, string b}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                column.name.delimiter ,
+                columns a,b
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.partitioned1
+                partition_columns.types int
+                serialization.ddl struct partitioned1 { i32 a, string b}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.partitioned1
+            name: default.partitioned1
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert into table partitioned1 partition(part=1) values(2, 'original'), (3, 'original'),(4, 'original')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@partitioned1@part=1
+POSTHOOK: query: insert into table partitioned1 partition(part=1) values(2, 'original'), (3, 'original'),(4, 'original')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@partitioned1@part=1
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+_col0	_col1
+PREHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: values__tmp__table__5
+            Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.partitioned1
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
+                outputColumnNames: a, b, part
+                Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                  keys: part (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            part 1
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.partitioned1
+
+  Stage: Stage-2
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: a, b
+          Column Types: int, string
+          Table: default.partitioned1
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.partitioned1
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.partitioned1
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: desc formatted partitioned1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name	data_type	comment
+# col_name            	data_type           	comment             
+a                   	int                 	                    
+b                   	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+part                	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numPartitions       	1                   
+	numRows             	4                   
+	rawDataSize         	40                  
+	totalSize           	44                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted partitioned1 partition(part=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name	data_type	comment
+# col_name            	data_type           	comment             
+a                   	int                 	                    
+b                   	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+part                	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	partitioned1        	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}}
+	numFiles            	2                   
+	numRows             	4                   
+	rawDataSize         	40                  
+	totalSize           	44                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted partitioned1 partition(part=1) a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partitioned1
+POSTHOOK: query: desc formatted partitioned1 partition(part=1) a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partitioned1
+col_name	data_type	min	max	num_nulls	distinct_count	avg_col_len	max_col_len	num_trues	num_falses	bitvector	comment
+col_name            	a                   	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	4                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	4                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_6.q.out b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
index c4ab489..70788fd 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
@@ -30,7 +30,6 @@ STAGE DEPENDENCIES:
   Stage-4
   Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
   Stage-2 depends on stages: Stage-0
-  Stage-8 depends on stages: Stage-2
   Stage-3
   Stage-5
   Stage-6 depends on stages: Stage-5
@@ -113,10 +112,8 @@ STAGE PLANS:
               name: default.orcfile_merge2a
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-8
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: int, string

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
index 2dc9fc2..acea69e 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out
@@ -26,8 +26,7 @@ STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
-  Stage-6 depends on stages: Stage-3, Stage-5
+  Stage-3 depends on stages: Stage-0, Stage-5
   Stage-4 depends on stages: Stage-2
   Stage-5 depends on stages: Stage-4
 
@@ -112,10 +111,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-3
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, c1, c2
           Column Types: string, int, string

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_8.q.out b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
index 9d22aeb..1d7e966 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out
@@ -47,10 +47,8 @@ STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-0 depends on stages: Stage-2
   Stage-3 depends on stages: Stage-0
-  Stage-6 depends on stages: Stage-3, Stage-4, Stage-5
-  Stage-7 depends on stages: Stage-3, Stage-4, Stage-5
   Stage-1 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-1
+  Stage-4 depends on stages: Stage-1, Stage-5
   Stage-5 depends on stages: Stage-2
 
 STAGE PLANS:
@@ -447,25 +445,10 @@ STAGE PLANS:
               name: default.nzhang_part8
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
-  Stage: Stage-6
-    Column Stats Work
-      Column Stats Desc:
-          Columns: key, value
-          Column Types: string, string
-          Table: default.nzhang_part8
-          Is Table Level Stats: false
-
-  Stage: Stage-7
-    Column Stats Work
-      Column Stats Desc:
-          Columns: key, value
-          Column Types: string, string
-          Table: default.nzhang_part8
-          Is Table Level Stats: false
-
   Stage: Stage-1
     Move Operator
       tables:
@@ -495,8 +478,14 @@ STAGE PLANS:
               name: default.nzhang_part8
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
+      Column Stats Desc:
+          Columns: key, value
+          Column Types: string, string
+          Table: default.nzhang_part8
+          Is Table Level Stats: false
 
   Stage: Stage-5
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_9.q.out b/ql/src/test/results/clientpositive/autoColumnStats_9.q.out
index c6d12fa..0dfcf04 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_9.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_9.q.out
@@ -20,8 +20,7 @@ STAGE DEPENDENCIES:
   Stage-7
   Stage-5 depends on stages: Stage-7
   Stage-0 depends on stages: Stage-5
-  Stage-2 depends on stages: Stage-0
-  Stage-8 depends on stages: Stage-2, Stage-3
+  Stage-2 depends on stages: Stage-0, Stage-3
   Stage-3 depends on stages: Stage-5
 
 STAGE PLANS:
@@ -166,10 +165,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-8
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: int, string

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join1.q.out b/ql/src/test/results/clientpositive/auto_join1.q.out
index 5f4bb74..c2d2473 100644
--- a/ql/src/test/results/clientpositive/auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/auto_join1.q.out
@@ -91,7 +91,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join14.q.out b/ql/src/test/results/clientpositive/auto_join14.q.out
index 1dd677c..e67506b 100644
--- a/ql/src/test/results/clientpositive/auto_join14.q.out
+++ b/ql/src/test/results/clientpositive/auto_join14.q.out
@@ -91,7 +91,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
 INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join17.q.out b/ql/src/test/results/clientpositive/auto_join17.q.out
index d39c36e..0239cf8 100644
--- a/ql/src/test/results/clientpositive/auto_join17.q.out
+++ b/ql/src/test/results/clientpositive/auto_join17.q.out
@@ -91,7 +91,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join19.q.out b/ql/src/test/results/clientpositive/auto_join19.q.out
index 3f70055..e70c514 100644
--- a/ql/src/test/results/clientpositive/auto_join19.q.out
+++ b/ql/src/test/results/clientpositive/auto_join19.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join19_inclause.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join19_inclause.q.out b/ql/src/test/results/clientpositive/auto_join19_inclause.q.out
index 3f70055..e70c514 100644
--- a/ql/src/test/results/clientpositive/auto_join19_inclause.q.out
+++ b/ql/src/test/results/clientpositive/auto_join19_inclause.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join2.q.out b/ql/src/test/results/clientpositive/auto_join2.q.out
index b17d344..4132caf 100644
--- a/ql/src/test/results/clientpositive/auto_join2.q.out
+++ b/ql/src/test/results/clientpositive/auto_join2.q.out
@@ -117,7 +117,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key)
 INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join26.q.out b/ql/src/test/results/clientpositive/auto_join26.q.out
index e6d966f..9b45741 100644
--- a/ql/src/test/results/clientpositive/auto_join26.q.out
+++ b/ql/src/test/results/clientpositive/auto_join26.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT  x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join3.q.out b/ql/src/test/results/clientpositive/auto_join3.q.out
index 35e8273..5921da0 100644
--- a/ql/src/test/results/clientpositive/auto_join3.q.out
+++ b/ql/src/test/results/clientpositive/auto_join3.q.out
@@ -113,7 +113,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join4.q.out b/ql/src/test/results/clientpositive/auto_join4.q.out
index 3c0ab85..fc34040 100644
--- a/ql/src/test/results/clientpositive/auto_join4.q.out
+++ b/ql/src/test/results/clientpositive/auto_join4.q.out
@@ -113,7 +113,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join5.q.out b/ql/src/test/results/clientpositive/auto_join5.q.out
index 8da15a5..fe4e24e 100644
--- a/ql/src/test/results/clientpositive/auto_join5.q.out
+++ b/ql/src/test/results/clientpositive/auto_join5.q.out
@@ -113,7 +113,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join6.q.out b/ql/src/test/results/clientpositive/auto_join6.q.out
index 00bbb1b..594e7a4 100644
--- a/ql/src/test/results/clientpositive/auto_join6.q.out
+++ b/ql/src/test/results/clientpositive/auto_join6.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join7.q.out b/ql/src/test/results/clientpositive/auto_join7.q.out
index 339ea70..13a5ff5 100644
--- a/ql/src/test/results/clientpositive/auto_join7.q.out
+++ b/ql/src/test/results/clientpositive/auto_join7.q.out
@@ -138,7 +138,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join8.q.out b/ql/src/test/results/clientpositive/auto_join8.q.out
index 4cc3d59..704ce3c 100644
--- a/ql/src/test/results/clientpositive/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/auto_join8.q.out
@@ -116,7 +116,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_join9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join9.q.out b/ql/src/test/results/clientpositive/auto_join9.q.out
index d7d7d18..e5022a3 100644
--- a/ql/src/test/results/clientpositive/auto_join9.q.out
+++ b/ql/src/test/results/clientpositive/auto_join9.q.out
@@ -91,7 +91,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out
index 8c7658c..486d7b3 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out
@@ -126,7 +126,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -139,7 +140,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
@@ -303,7 +305,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -316,7 +319,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
@@ -480,7 +484,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -493,7 +498,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/avro_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_decimal.q.out b/ql/src/test/results/clientpositive/avro_decimal.q.out
index b093a22..4d69221 100644
--- a/ql/src/test/results/clientpositive/avro_decimal.q.out
+++ b/ql/src/test/results/clientpositive/avro_decimal.q.out
@@ -21,10 +21,12 @@ POSTHOOK: Output: default@dec
 PREHOOK: query: ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dec
+PREHOOK: Output: default@dec
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dec
+POSTHOOK: Output: default@dec
 #### A masked pattern was here ####
 PREHOOK: query: DESC FORMATTED `dec` value
 PREHOOK: type: DESCTABLE
@@ -44,7 +46,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/avro_decimal_native.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_decimal_native.q.out b/ql/src/test/results/clientpositive/avro_decimal_native.q.out
index abf5937..e4b1739 100644
--- a/ql/src/test/results/clientpositive/avro_decimal_native.q.out
+++ b/ql/src/test/results/clientpositive/avro_decimal_native.q.out
@@ -25,10 +25,12 @@ POSTHOOK: Output: default@dec
 PREHOOK: query: ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dec
+PREHOOK: Output: default@dec
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dec
+POSTHOOK: Output: default@dec
 #### A masked pattern was here ####
 PREHOOK: query: DESC FORMATTED `dec` value
 PREHOOK: type: DESCTABLE
@@ -48,7 +50,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/basicstat_partval.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/basicstat_partval.q.out b/ql/src/test/results/clientpositive/basicstat_partval.q.out
new file mode 100644
index 0000000..fa5c5ba
--- /dev/null
+++ b/ql/src/test/results/clientpositive/basicstat_partval.q.out
@@ -0,0 +1,132 @@
+PREHOOK: query: CREATE TABLE p1(i int) partitioned by (p string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@p1
+POSTHOOK: query: CREATE TABLE p1(i int) partitioned by (p string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@p1
+PREHOOK: query: insert into p1 partition(p='a') values (1)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p1@p=a
+POSTHOOK: query: insert into p1 partition(p='a') values (1)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p1@p=a
+POSTHOOK: Lineage: p1 PARTITION(p=a).i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: insert into p1 partition(p='A') values (2),(3)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p1@p=A
+POSTHOOK: query: insert into p1 partition(p='A') values (2),(3)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p1@p=A
+POSTHOOK: Lineage: p1 PARTITION(p=A).i EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: describe formatted p1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p1
+POSTHOOK: query: describe formatted p1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p1
+# col_name            	data_type           	comment             
+i                   	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+p                   	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numPartitions       	2                   
+	numRows             	3                   
+	rawDataSize         	3                   
+	totalSize           	6                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted p1 partition(p='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p1
+POSTHOOK: query: describe formatted p1 partition(p='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p1
+# col_name            	data_type           	comment             
+i                   	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+p                   	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[a]                 	 
+Database:           	default             	 
+Table:              	p1                  	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	1                   
+	totalSize           	2                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted p1 partition(p='A')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p1
+POSTHOOK: query: describe formatted p1 partition(p='A')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p1
+# col_name            	data_type           	comment             
+i                   	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+p                   	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[A]                 	 
+Database:           	default             	 
+Table:              	p1                  	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	2                   
+	rawDataSize         	2                   
+	totalSize           	4                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/beeline/colstats_all_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/colstats_all_nulls.q.out b/ql/src/test/results/clientpositive/beeline/colstats_all_nulls.q.out
index 7e26ae0..49af8f6 100644
--- a/ql/src/test/results/clientpositive/beeline/colstats_all_nulls.q.out
+++ b/ql/src/test/results/clientpositive/beeline/colstats_all_nulls.q.out
@@ -31,10 +31,12 @@ PREHOOK: query: analyze table all_nulls compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@all_nulls
 #### A masked pattern was here ####
+PREHOOK: Output: default@all_nulls
 POSTHOOK: query: analyze table all_nulls compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@all_nulls
 #### A masked pattern was here ####
+POSTHOOK: Output: default@all_nulls
 PREHOOK: query: describe formatted all_nulls a
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@all_nulls

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
index b3f36ad..ea314b3 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
@@ -46,6 +46,51 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: desc formatted smb_bucket_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@smb_bucket_1
+POSTHOOK: query: desc formatted smb_bucket_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@smb_bucket_1
+	NULL	NULL
+	NULL	NULL
+	SORTBUCKETCOLSPREFIX	TRUE                
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	serialization.format	1                   
+	totalSize           	208                 
+#### A masked pattern was here ####
+# Detailed Table Information	NULL	NULL
+# Storage Information	NULL	NULL
+# col_name	data_type	comment
+Bucket Columns:     	[key]               	NULL
+Compressed:         	No                  	NULL
+#### A masked pattern was here ####
+Database:           	test_db_smb_mapjoin_1	NULL
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	NULL
+LastAccessTime:     	UNKNOWN             	NULL
+#### A masked pattern was here ####
+Num Buckets:        	1                   	NULL
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	NULL
+#### A masked pattern was here ####
+Retention:          	0                   	NULL
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	NULL
+Sort Columns:       	[Order(col:key, order:1)]	NULL
+Storage Desc Params:	NULL	NULL
+Table Parameters:	NULL	NULL
+Table Type:         	MANAGED_TABLE       	NULL
+key	int	
+value	string	
+PREHOOK: query: select count(*) from smb_bucket_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from smb_bucket_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+#### A masked pattern was here ####
+5
 PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
index b53e670..b76e7a8 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
@@ -202,7 +202,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
index 9af4683..6def3de 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_12.q.out
@@ -204,7 +204,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
@@ -409,7 +410,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
index 82f5804..a2a8660 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out
@@ -669,7 +669,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/binary_output_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/binary_output_format.q.out b/ql/src/test/results/clientpositive/binary_output_format.q.out
index ddb6adf..ce85351 100644
--- a/ql/src/test/results/clientpositive/binary_output_format.q.out
+++ b/ql/src/test/results/clientpositive/binary_output_format.q.out
@@ -208,7 +208,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucket1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket1.q.out b/ql/src/test/results/clientpositive/bucket1.q.out
index 1d20473..c59a755 100644
--- a/ql/src/test/results/clientpositive/bucket1.q.out
+++ b/ql/src/test/results/clientpositive/bucket1.q.out
@@ -160,7 +160,8 @@ STAGE PLANS:
               name: default.bucket1_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket1_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucket2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket2.q.out b/ql/src/test/results/clientpositive/bucket2.q.out
index 48ccafb..4bee31b 100644
--- a/ql/src/test/results/clientpositive/bucket2.q.out
+++ b/ql/src/test/results/clientpositive/bucket2.q.out
@@ -160,7 +160,8 @@ STAGE PLANS:
               name: default.bucket2_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket2_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket3.q.out b/ql/src/test/results/clientpositive/bucket3.q.out
index b1173e7..49f12f7 100644
--- a/ql/src/test/results/clientpositive/bucket3.q.out
+++ b/ql/src/test/results/clientpositive/bucket3.q.out
@@ -157,7 +157,8 @@ STAGE PLANS:
               name: default.bucket3_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
index 70cd53c..f210c5a 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
@@ -390,7 +390,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
@@ -724,7 +725,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
index d0c3a1a..c6d6079 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
@@ -374,7 +374,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
@@ -708,7 +709,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
index eed4a5a..52c17cc 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark3.q.out
@@ -374,7 +374,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
@@ -708,7 +709,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketmapjoin5.q.out b/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
index 5657cbf..1096912 100644
--- a/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
+++ b/ql/src/test/results/clientpositive/bucketmapjoin5.q.out
@@ -415,7 +415,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4
@@ -956,7 +957,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
index 2da33c6..15286ed 100644
--- a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
+++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out
@@ -304,7 +304,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
index bf1c011..3c171d6 100644
--- a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
+++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out
@@ -367,7 +367,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out
index 165f0dc..2d6bd6f 100644
--- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out
+++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out
@@ -82,7 +82,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT x.key, x.value from 
@@ -187,7 +188,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT * from 
@@ -292,7 +294,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
@@ -357,7 +360,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
@@ -411,5 +415,6 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 


[20/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index dbf4b8d..d86162b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -33,7 +33,6 @@ import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.BlobStorageUtils;
@@ -71,7 +70,6 @@ import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
 import org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat;
-import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -85,16 +83,17 @@ import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles;
 import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx;
 import org.apache.hadoop.hive.ql.plan.ConditionalWork;
+import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.FileMergeDesc;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc;
-import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork;
 import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -122,6 +121,7 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Interner;
 
@@ -496,6 +496,10 @@ public final class GenMapRedUtils {
 
     Path tblDir = null;
     plan.setNameToSplitSample(parseCtx.getNameToSplitSample());
+    // we also collect table stats while collecting column stats.
+    if (parseCtx.getAnalyzeRewrite() != null) {
+      plan.setGatheringStats(true);
+    }
 
     if (partsList == null) {
       try {
@@ -1480,17 +1484,34 @@ public final class GenMapRedUtils {
       Task<? extends Serializable> currTask, HiveConf hconf) {
 
     MoveWork mvWork = mvTask.getWork();
-    StatsWork statsWork = null;
+    BasicStatsWork statsWork = null;
+    Table table = null;
+    boolean truncate = false;
     if (mvWork.getLoadTableWork() != null) {
-      statsWork = new StatsWork(mvWork.getLoadTableWork());
+      statsWork = new BasicStatsWork(mvWork.getLoadTableWork());
+      String tableName = mvWork.getLoadTableWork().getTable().getTableName();
+      truncate = mvWork.getLoadTableWork().getReplace();
+      try {
+        table = Hive.get().getTable(SessionState.get().getCurrentDatabase(), tableName);
+      } catch (HiveException e) {
+        throw new RuntimeException("unexpected; table should be present already..: " + tableName, e);
+      }
     } else if (mvWork.getLoadFileWork() != null) {
-      statsWork = new StatsWork(mvWork.getLoadFileWork());
+      statsWork = new BasicStatsWork(mvWork.getLoadFileWork());
+
+      truncate = true;
+      if (mvWork.getLoadFileWork().getCtasCreateTableDesc() == null) {
+        throw new RuntimeException("unexpected; this should be a CTAS - however no desc present");
+      }
+      try {
+        table = mvWork.getLoadFileWork().getCtasCreateTableDesc().toTable(hconf);
+      } catch (HiveException e) {
+        LOG.debug("can't pre-create table", e);
+        table = null;
+      }
     }
     assert statsWork != null : "Error when generating StatsTask";
 
-    statsWork.setSourceTask(currTask);
-    statsWork.setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE));
-    statsWork.setStatsTmpDir(nd.getConf().getStatsTmpDir());
     if (currTask.getWork() instanceof MapredWork) {
       MapredWork mrWork = (MapredWork) currTask.getWork();
       mrWork.getMapWork().setGatheringStats(true);
@@ -1509,10 +1530,13 @@ public final class GenMapRedUtils {
       }
     }
 
-    // AggKey in StatsWork is used for stats aggregation while StatsAggPrefix
-    // in FileSinkDesc is used for stats publishing. They should be consistent.
-    statsWork.setAggKey(nd.getConf().getStatsAggPrefix());
-    Task<? extends Serializable> statsTask = TaskFactory.get(statsWork, hconf);
+    StatsWork columnStatsWork = new StatsWork(table, statsWork, hconf);
+    columnStatsWork.collectStatsFromAggregator(nd.getConf());
+
+    columnStatsWork.truncateExisting(truncate);
+
+    columnStatsWork.setSourceTask(currTask);
+    Task<? extends Serializable> statsTask = TaskFactory.get(columnStatsWork, hconf);
 
     // subscribe feeds from the MoveTask so that MoveTask can forward the list
     // of dynamic partition list to the StatsTask
@@ -1579,7 +1603,7 @@ public final class GenMapRedUtils {
    */
   public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName,
       boolean hasDynamicPartitions, CompilationOpContext ctx) throws SemanticException {
-    
+
     Path inputDir = fsInputDesc.getMergeInputDirName();
 
     TableDesc tblDesc = fsInputDesc.getTableInfo();
@@ -1860,7 +1884,9 @@ public final class GenMapRedUtils {
   public static boolean isMergeRequired(List<Task<MoveWork>> mvTasks, HiveConf hconf,
       FileSinkOperator fsOp, Task<? extends Serializable> currTask, boolean isInsertTable) {
     // Has the user enabled merging of files for map-only jobs or for all jobs
-    if (mvTasks == null  || mvTasks.isEmpty()) return false;
+    if (mvTasks == null  || mvTasks.isEmpty()) {
+      return false;
+    }
 
     // no need of merging if the move is to a local file system
     // We are looking based on the original FSOP, so use the original path as is.
@@ -1878,7 +1904,9 @@ public final class GenMapRedUtils {
       }
     }
 
-    if (mvTask == null || mvTask.isLocal() || !fsOp.getConf().canBeMerged()) return false;
+    if (mvTask == null || mvTask.isLocal() || !fsOp.getConf().canBeMerged()) {
+      return false;
+    }
 
     if (currTask.getWork() instanceof TezWork) {
       // tez blurs the boundary between map and reduce, thus it has it's own config

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
index 91c6c00..624eb6c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hive.ql.optimizer;
 
+import java.util.List;
+import java.util.Set;
+
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorUtils;
@@ -30,8 +33,9 @@ import org.apache.hadoop.hive.ql.parse.GenTezWork;
 import org.apache.hadoop.hive.ql.parse.spark.GenSparkWork;
 import org.apache.hadoop.hive.ql.plan.ArchiveWork;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
+import org.apache.hadoop.hive.ql.plan.BasicStatsNoJobWork;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
-import org.apache.hadoop.hive.ql.plan.ColumnStatsWork;
 import org.apache.hadoop.hive.ql.plan.ConditionalWork;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
@@ -45,15 +49,11 @@ import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.SparkWork;
-import org.apache.hadoop.hive.ql.plan.StatsNoJobWork;
 import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.TezWork;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.List;
-import java.util.Set;
-
 /**
  * Finds Acid FileSinkDesc objects which can be created in the physical (disconnected) plan, e.g.
  * {@link org.apache.hadoop.hive.ql.parse.GenTezUtils#removeUnionOperators(GenTezProcContext, BaseWork, int)}
@@ -92,7 +92,7 @@ public class QueryPlanPostProcessor {
       }
       else if(work instanceof MapredLocalWork) {
         //I don't think this can have any FileSinkOperatorS - more future proofing
-        Set<FileSinkOperator> fileSinkOperatorSet = OperatorUtils.findOperators(((MapredLocalWork)work).getAliasToWork().values(), FileSinkOperator.class);
+        Set<FileSinkOperator> fileSinkOperatorSet = OperatorUtils.findOperators(((MapredLocalWork) work).getAliasToWork().values(), FileSinkOperator.class);
         for(FileSinkOperator fsop : fileSinkOperatorSet) {
           collectFileSinkDescs(fsop, acidSinks);
         }
@@ -100,42 +100,6 @@ public class QueryPlanPostProcessor {
       else if(work instanceof ExplainWork) {
         new QueryPlanPostProcessor(((ExplainWork)work).getRootTasks(), acidSinks, executionId);
       }
-      /*
-      ekoifman:~ ekoifman$ cd dev/hiverwgit/ql/src/java/org/apache/
-ekoifman:apache ekoifman$ find . -name *Work.java
-./hadoop/hive/ql/exec/repl/bootstrap/ReplLoadWork.java
-./hadoop/hive/ql/exec/repl/ReplDumpWork.java
-./hadoop/hive/ql/exec/repl/ReplStateLogWork.java
-./hadoop/hive/ql/index/IndexMetadataChangeWork.java
-./hadoop/hive/ql/io/merge/MergeFileWork.java - extends MapWork
-./hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateWork.java - extends MapWork
-./hadoop/hive/ql/parse/GenTezWork.java
-./hadoop/hive/ql/parse/spark/GenSparkWork.java
-./hadoop/hive/ql/plan/ArchiveWork.java
-./hadoop/hive/ql/plan/BaseWork.java
-./hadoop/hive/ql/plan/ColumnStatsUpdateWork.java
-./hadoop/hive/ql/plan/ColumnStatsWork.java
-./hadoop/hive/ql/plan/ConditionalWork.java
-./hadoop/hive/ql/plan/CopyWork.java
-./hadoop/hive/ql/plan/DDLWork.java
-./hadoop/hive/ql/plan/DependencyCollectionWork.java
-./hadoop/hive/ql/plan/ExplainSQRewriteWork.java
-./hadoop/hive/ql/plan/ExplainWork.java
-./hadoop/hive/ql/plan/FetchWork.java
-./hadoop/hive/ql/plan/FunctionWork.java
-./hadoop/hive/ql/plan/MapredLocalWork.java
-./hadoop/hive/ql/plan/MapredWork.java
-./hadoop/hive/ql/plan/MapWork.java - extends BaseWork
-./hadoop/hive/ql/plan/MergeJoinWork.java - extends BaseWork
-./hadoop/hive/ql/plan/MoveWork.java
-./hadoop/hive/ql/plan/ReduceWork.java
-./hadoop/hive/ql/plan/ReplCopyWork.java - extends CopyWork
-./hadoop/hive/ql/plan/SparkWork.java
-./hadoop/hive/ql/plan/StatsNoJobWork.java
-./hadoop/hive/ql/plan/StatsWork.java
-./hadoop/hive/ql/plan/TezWork.java
-./hadoop/hive/ql/plan/UnionWork.java - extends BaseWork
-      */
       else if(work instanceof ReplLoadWork ||
         work instanceof ReplStateLogWork ||
         work instanceof IndexMetadataChangeWork ||
@@ -143,7 +107,7 @@ ekoifman:apache ekoifman$ find . -name *Work.java
         work instanceof GenSparkWork ||
         work instanceof ArchiveWork ||
         work instanceof ColumnStatsUpdateWork ||
-        work instanceof ColumnStatsWork ||
+        work instanceof BasicStatsWork ||
         work instanceof ConditionalWork ||
         work instanceof CopyWork ||
         work instanceof DDLWork ||
@@ -152,7 +116,7 @@ ekoifman:apache ekoifman$ find . -name *Work.java
         work instanceof FetchWork ||
         work instanceof FunctionWork ||
         work instanceof MoveWork ||
-        work instanceof StatsNoJobWork ||
+        work instanceof BasicStatsNoJobWork ||
         work instanceof StatsWork) {
         LOG.debug("Found " + work.getClass().getName() + " - no FileSinkOperation can be present.  executionId=" + executionId);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java
index 3a20cfe..bf7a644 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java
@@ -22,11 +22,9 @@ import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.Iterator;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
-import java.util.List;
 import java.util.Set;
 import java.util.SortedSet;
 import java.util.Stack;
@@ -35,12 +33,9 @@ import java.util.TreeSet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
-import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
-import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.StatsTask;
+import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.tez.DagUtils;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
@@ -58,8 +53,6 @@ import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.MergeJoinWork;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
-import org.apache.hadoop.hive.ql.plan.TezEdgeProperty;
-import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType;
 import org.apache.hadoop.hive.ql.plan.TezWork;
 
 /**
@@ -174,6 +167,7 @@ public class MemoryDecider implements PhysicalPlanResolver {
         }
 
         Comparator<MapJoinOperator> comp = new Comparator<MapJoinOperator>() {
+            @Override
             public int compare(MapJoinOperator mj1, MapJoinOperator mj2) {
               if (mj1 == null || mj2 == null) {
                 throw new NullPointerException();

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java
index dc433fe..6a0ca5d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java
@@ -26,8 +26,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
 import org.apache.hadoop.hive.ql.exec.StatsTask;
+import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
index 2f9783e..e1df60b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java
@@ -86,7 +86,9 @@ public class SkewJoinResolver implements PhysicalPlanResolver {
       ParseContext pc = physicalContext.getParseContext();
       if (pc.getLoadTableWork() != null) {
         for (LoadTableDesc ltd : pc.getLoadTableWork()) {
-          if (!ltd.isMmTable()) continue;
+          if (!ltd.isMmTable()) {
+            continue;
+          }
           // See the path in FSOP that calls fs.exists on finalPath.
           LOG.debug("Not using skew join because the destination table "
               + ltd.getTable().getTableName() + " is an insert_only table");
@@ -95,9 +97,10 @@ public class SkewJoinResolver implements PhysicalPlanResolver {
       }
       if (pc.getLoadFileWork() != null) {
         for (LoadFileDesc lfd : pc.getLoadFileWork()) {
-          if (!lfd.isMmCtas()) continue;
-          LOG.debug("Not using skew join because the destination table "
-              + lfd.getDestinationCreateTable() + " is an insert_only table");
+          if (!lfd.isMmCtas()) {
+            continue;
+          }
+          LOG.debug("Not using skew join because the destination table is an insert_only table");
           return null;
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 0f7ef8b..e9ae590 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -81,7 +81,6 @@ import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
-import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
@@ -369,10 +368,10 @@ public abstract class BaseSemanticAnalyzer {
       String dbName = dbTablePair.getKey();
       String tableName = dbTablePair.getValue();
       if (dbName != null){
-        return StatsUtils.getFullyQualifiedTableName(dbName, tableName);
+        return dbName + "." + tableName;
       }
       if (currentDatabase != null) {
-        return StatsUtils.getFullyQualifiedTableName(currentDatabase, tableName);
+        return currentDatabase + "." + tableName;
       }
       return tableName;
     } else if (tokenType == HiveParser.StringLiteral) {
@@ -1120,19 +1119,22 @@ public abstract class BaseSemanticAnalyzer {
 
     public TableSpec(Table table) {
       tableHandle = table;
-      tableName = table.getFullyQualifiedName();
+      tableName = table.getDbName() + "." + table.getTableName();
       specType = SpecType.TABLE_ONLY;
     }
 
     public TableSpec(Hive db, String tableName, Map<String, String> partSpec)
         throws HiveException {
       Table table = db.getTable(tableName);
-      final Partition partition = partSpec == null ? null : db.getPartition(table, partSpec, false);
       tableHandle = table;
-      this.tableName = table.getFullyQualifiedName();
-      if (partition == null) {
+      this.tableName = table.getDbName() + "." + table.getTableName();
+      if (partSpec == null) {
         specType = SpecType.TABLE_ONLY;
       } else {
+        Partition partition = db.getPartition(table, partSpec, false);
+        if (partition == null) {
+          throw new SemanticException("partition is unknown: " + table + "/" + partSpec);
+        }
         partHandle = partition;
         partitions = Collections.singletonList(partHandle);
         specType = SpecType.STATIC_PARTITION;
@@ -1733,7 +1735,9 @@ public abstract class BaseSemanticAnalyzer {
   @VisibleForTesting
   static void normalizeColSpec(Map<String, String> partSpec, String colName,
       String colType, String originalColSpec, Object colValue) throws SemanticException {
-    if (colValue == null) return; // nothing to do with nulls
+    if (colValue == null) {
+      return; // nothing to do with nulls
+    }
     String normalizedColSpec = originalColSpec;
     if (colType.equals(serdeConstants.DATE_TYPE_NAME)) {
       normalizedColSpec = normalizeDateCol(colValue, originalColSpec);

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 3415a23..29b904e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -93,8 +93,10 @@ import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
 import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition;
 import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
 import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
 import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc;
@@ -140,7 +142,6 @@ import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc;
-import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
@@ -1313,18 +1314,19 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
 
         // Recalculate the HDFS stats if auto gather stats is set
         if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-          StatsWork statDesc;
+          BasicStatsWork basicStatsWork;
           if (oldTblPartLoc.equals(newTblPartLoc)) {
             // If we're merging to the same location, we can avoid some metastore calls
             TableSpec tablepart = new TableSpec(this.db, conf, root);
-            statDesc = new StatsWork(tablepart);
+            basicStatsWork = new BasicStatsWork(tablepart);
           } else {
-            statDesc = new StatsWork(ltd);
+            basicStatsWork = new BasicStatsWork(ltd);
           }
-          statDesc.setNoStatsAggregator(true);
-          statDesc.setClearAggregatorStats(true);
-          statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
-          Task<? extends Serializable> statTask = TaskFactory.get(statDesc, conf);
+          basicStatsWork.setNoStatsAggregator(true);
+          basicStatsWork.setClearAggregatorStats(true);
+          StatsWork columnStatsWork = new StatsWork(table, basicStatsWork, conf);
+
+          Task<? extends Serializable> statTask = TaskFactory.get(columnStatsWork, conf);
           moveTsk.addDependentTask(statTask);
         }
       } catch (HiveException e) {
@@ -1657,7 +1659,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     alterTblDesc.setEnvironmentContext(environmentContext);
     alterTblDesc.setOldName(tableName);
 
-    boolean isPotentialMmSwitch = mapProp.containsKey(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)
+    boolean isPotentialMmSwitch = AcidUtils.isTablePropertyTransactional(mapProp)
         || mapProp.containsKey(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
     addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, isPotentialMmSwitch);
 
@@ -1972,18 +1974,19 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       mergeTask.addDependentTask(moveTsk);
 
       if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-        StatsWork statDesc;
+        BasicStatsWork basicStatsWork;
         if (oldTblPartLoc.equals(newTblPartLoc)) {
           // If we're merging to the same location, we can avoid some metastore calls
           TableSpec tableSpec = new TableSpec(db, tableName, partSpec);
-          statDesc = new StatsWork(tableSpec);
+          basicStatsWork = new BasicStatsWork(tableSpec);
         } else {
-          statDesc = new StatsWork(ltd);
+          basicStatsWork = new BasicStatsWork(ltd);
         }
-        statDesc.setNoStatsAggregator(true);
-        statDesc.setClearAggregatorStats(true);
-        statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
-        Task<? extends Serializable> statTask = TaskFactory.get(statDesc, conf);
+        basicStatsWork.setNoStatsAggregator(true);
+        basicStatsWork.setClearAggregatorStats(true);
+        StatsWork columnStatsWork = new StatsWork(tblObj, basicStatsWork, conf);
+
+        Task<? extends Serializable> statTask = TaskFactory.get(columnStatsWork, conf);
         moveTsk.addDependentTask(statTask);
       }
 
@@ -2085,7 +2088,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     switch (child.getToken().getType()) {
       case HiveParser.TOK_UNIQUE:
         BaseSemanticAnalyzer.processUniqueConstraints(qualifiedTabName[0], qualifiedTabName[1],
-                child, uniqueConstraints);        
+                child, uniqueConstraints);
         break;
       case HiveParser.TOK_PRIMARY_KEY:
         BaseSemanticAnalyzer.processPrimaryKeys(qualifiedTabName[0], qualifiedTabName[1],

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
index 7a0d4a7..065c7e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
@@ -26,7 +26,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.antlr.runtime.TokenRewriteStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -40,17 +39,12 @@ import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.ExplainTask;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
-import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.VectorizationDetailLevel;
 import org.apache.hadoop.hive.ql.plan.ExplainWork;
-import org.apache.hadoop.hive.ql.processors.CommandProcessor;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.stats.StatsAggregator;
 import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
 import org.apache.hadoop.hive.ql.stats.fs.FSStatsAggregator;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
index 01cb2b3..b6f1139 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
@@ -205,10 +205,6 @@ public class GenTezUtils {
     // All the setup is done in GenMapRedUtils
     GenMapRedUtils.setMapWork(mapWork, context.parseContext,
         context.inputs, partitions, root, alias, context.conf, false);
-    // we also collect table stats while collecting column stats.
-    if (context.parseContext.getAnalyzeRewrite() != null) {
-      mapWork.setGatheringStats(true);
-    }
   }
 
   // removes any union operator and clones the plan

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 1318c18..cd75130 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -238,7 +238,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
 
     if ((replicationSpec != null) && replicationSpec.isInReplicationScope()){
       tblDesc.setReplicationSpec(replicationSpec);
-      tblDesc.getTblProps().remove(StatsSetupConst.COLUMN_STATS_ACCURATE);
+      StatsSetupConst.setBasicStatsState(tblDesc.getTblProps(), StatsSetupConst.FALSE);
     }
 
     if (isExternalSet){
@@ -266,7 +266,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       // TODO: this should ideally not create AddPartitionDesc per partition
       AddPartitionDesc partsDesc = getBaseAddPartitionDescFromPartition(fromPath, dbname, tblDesc, partition);
       if ((replicationSpec != null) && replicationSpec.isInReplicationScope()){
-        partsDesc.getPartition(0).getPartParams().remove(StatsSetupConst.COLUMN_STATS_ACCURATE);
+        StatsSetupConst.setBasicStatsState(partsDesc.getPartition(0).getPartParams(), StatsSetupConst.FALSE);
       }
       partitionDescs.add(partsDesc);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 5f2a34e..238fbd6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.parse;
 
 import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
-
 import java.io.IOException;
 import java.io.Serializable;
 import java.net.URI;
@@ -36,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -50,11 +48,12 @@ import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
-import org.apache.hadoop.hive.ql.plan.StatsWork;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.mapred.InputFormat;
 
@@ -225,8 +224,10 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
     List<String> bucketCols = ts.tableHandle.getBucketCols();
     if (bucketCols != null && !bucketCols.isEmpty()) {
       String error = StrictChecks.checkBucketing(conf);
-      if (error != null) throw new SemanticException("Please load into an intermediate table"
-          + " and use 'insert... select' to allow Hive to enforce bucketing. " + error);
+      if (error != null) {
+        throw new SemanticException("Please load into an intermediate table"
+            + " and use 'insert... select' to allow Hive to enforce bucketing. " + error);
+      }
     }
 
     if(AcidUtils.isAcidTable(ts.tableHandle) && !AcidUtils.isInsertOnlyTable(ts.tableHandle.getParameters())) {
@@ -313,11 +314,11 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
     // Update the stats which do not require a complete scan.
     Task<? extends Serializable> statTask = null;
     if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-      StatsWork statDesc = new StatsWork(loadTableWork);
-      statDesc.setNoStatsAggregator(true);
-      statDesc.setClearAggregatorStats(true);
-      statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
-      statTask = TaskFactory.get(statDesc, conf);
+      BasicStatsWork basicStatsWork = new BasicStatsWork(loadTableWork);
+      basicStatsWork.setNoStatsAggregator(true);
+      basicStatsWork.setClearAggregatorStats(true);
+      StatsWork columnStatsWork = new StatsWork(ts.tableHandle, basicStatsWork, conf);
+      statTask = TaskFactory.get(columnStatsWork, conf);
     }
 
     // HIVE-3334 has been filed for load file with index auto update

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
index 9309fbd..1c2ad7b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 import java.util.Stack;
@@ -26,30 +25,25 @@ import java.util.Stack;
 import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
-import org.apache.hadoop.hive.ql.plan.MapWork;
-import org.apache.hadoop.hive.ql.plan.StatsNoJobWork;
 import org.apache.hadoop.hive.ql.plan.StatsWork;
+import org.apache.hadoop.hive.ql.plan.MapWork;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.plan.TezWork;
 import org.apache.hadoop.mapred.InputFormat;
 
 /**
  * ProcessAnalyzeTable sets up work for the several variants of analyze table
- * (normal, no scan, partial scan.) The plan at this point will be a single
+ * (normal, no scan.) The plan at this point will be a single
  * table scan operator.
  */
 public class ProcessAnalyzeTable implements NodeProcessor {
@@ -76,8 +70,8 @@ public class ProcessAnalyzeTable implements NodeProcessor {
     TableScanOperator tableScan = (TableScanOperator) nd;
 
     ParseContext parseContext = context.parseContext;
-    Class<? extends InputFormat> inputFormat = tableScan.getConf().getTableMetadata()
-        .getInputFormatClass();
+    Table table = tableScan.getConf().getTableMetadata();
+    Class<? extends InputFormat> inputFormat = table.getInputFormatClass();
 
     if (parseContext.getQueryProperties().isAnalyzeCommand()) {
 
@@ -99,20 +93,17 @@ public class ProcessAnalyzeTable implements NodeProcessor {
         // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
 
         // There will not be any Tez job above this task
-        StatsNoJobWork snjWork = new StatsNoJobWork(tableScan.getConf().getTableMetadata()
-            .getTableSpec());
-        snjWork.setStatsReliable(parseContext.getConf().getBoolVar(
-            HiveConf.ConfVars.HIVE_STATS_RELIABLE));
+        StatsWork statWork = new StatsWork(table, parseContext.getConf());
+        statWork.setFooterScan();
+
         // If partition is specified, get pruned partition list
         Set<Partition> confirmedParts = GenMapRedUtils.getConfirmedPartitionsForScan(tableScan);
         if (confirmedParts.size() > 0) {
-          Table source = tableScan.getConf().getTableMetadata();
           List<String> partCols = GenMapRedUtils.getPartitionColumns(tableScan);
-          PrunedPartitionList partList = new PrunedPartitionList(source, confirmedParts, partCols,
-              false);
-          snjWork.setPrunedPartitionList(partList);
+          PrunedPartitionList partList = new PrunedPartitionList(table, confirmedParts, partCols, false);
+          statWork.addInputPartitions(partList.getPartitions());
         }
-        Task<StatsNoJobWork> snjTask = TaskFactory.get(snjWork, parseContext.getConf());
+        Task<StatsWork> snjTask = TaskFactory.get(statWork, parseContext.getConf());
         snjTask.setParentTasks(null);
         context.rootTasks.remove(context.currentTask);
         context.rootTasks.add(snjTask);
@@ -123,20 +114,19 @@ public class ProcessAnalyzeTable implements NodeProcessor {
         // The plan consists of a simple TezTask followed by a StatsTask.
         // The Tez task is just a simple TableScanOperator
 
-        StatsWork statsWork = new StatsWork(tableScan.getConf().getTableMetadata().getTableSpec());
-        statsWork.setAggKey(tableScan.getConf().getStatsAggPrefix());
-        statsWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir());
-        statsWork.setSourceTask(context.currentTask);
-        statsWork.setStatsReliable(parseContext.getConf().getBoolVar(
-            HiveConf.ConfVars.HIVE_STATS_RELIABLE));
-        Task<StatsWork> statsTask = TaskFactory.get(statsWork, parseContext.getConf());
+        BasicStatsWork basicStatsWork = new BasicStatsWork(table.getTableSpec());
+        basicStatsWork.setNoScanAnalyzeCommand(parseContext.getQueryProperties().isNoScanAnalyzeCommand());
+        StatsWork columnStatsWork = new StatsWork(table, basicStatsWork, parseContext.getConf());
+        columnStatsWork.collectStatsFromAggregator(tableScan.getConf());
+
+        columnStatsWork.setSourceTask(context.currentTask);
+        Task<StatsWork> statsTask = TaskFactory.get(columnStatsWork, parseContext.getConf());
         context.currentTask.addDependentTask(statsTask);
 
         // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
         // The plan consists of a StatsTask only.
         if (parseContext.getQueryProperties().isNoScanAnalyzeCommand()) {
           statsTask.setParentTasks(null);
-          statsWork.setNoScanAnalyzeCommand(true);
           context.rootTasks.remove(context.currentTask);
           context.rootTasks.add(statsTask);
         }
@@ -147,9 +137,8 @@ public class ProcessAnalyzeTable implements NodeProcessor {
         Set<Partition> confirmedPartns = GenMapRedUtils.getConfirmedPartitionsForScan(tableScan);
         PrunedPartitionList partitions = null;
         if (confirmedPartns.size() > 0) {
-          Table source = tableScan.getConf().getTableMetadata();
           List<String> partCols = GenMapRedUtils.getPartitionColumns(tableScan);
-          partitions = new PrunedPartitionList(source, confirmedPartns, partCols, false);
+          partitions = new PrunedPartitionList(table, confirmedPartns, partCols, false);
         }
 
         MapWork w = utils.createMapWork(context, tableScan, tezWork, partitions);
@@ -157,55 +146,8 @@ public class ProcessAnalyzeTable implements NodeProcessor {
 
         return true;
       }
-    } else if (parseContext.getAnalyzeRewrite() != null) {
-      // we need to collect table stats while collecting column stats.
-      try {
-        context.currentTask.addDependentTask(genTableStats(context, tableScan));
-      } catch (HiveException e) {
-        throw new SemanticException(e);
-      }
     }
 
     return null;
   }
-
-  private Task<?> genTableStats(GenTezProcContext context, TableScanOperator tableScan)
-      throws HiveException {
-    Class<? extends InputFormat> inputFormat = tableScan.getConf().getTableMetadata()
-        .getInputFormatClass();
-    ParseContext parseContext = context.parseContext;
-    Table table = tableScan.getConf().getTableMetadata();
-    List<Partition> partitions = new ArrayList<>();
-    if (table.isPartitioned()) {
-      partitions.addAll(parseContext.getPrunedPartitions(tableScan).getPartitions());
-      for (Partition partn : partitions) {
-        LOG.debug("XXX: adding part: " + partn);
-        context.outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_NO_LOCK));
-      }
-    }
-    TableSpec tableSpec = new TableSpec(table, partitions);
-    tableScan.getConf().getTableMetadata().setTableSpec(tableSpec);
-
-    if (inputFormat.equals(OrcInputFormat.class)) {
-      // For ORC, there is no Tez Job for table stats.
-      StatsNoJobWork snjWork = new StatsNoJobWork(tableScan.getConf().getTableMetadata()
-          .getTableSpec());
-      snjWork.setStatsReliable(parseContext.getConf().getBoolVar(
-          HiveConf.ConfVars.HIVE_STATS_RELIABLE));
-      // If partition is specified, get pruned partition list
-      if (partitions.size() > 0) {
-        snjWork.setPrunedPartitionList(parseContext.getPrunedPartitions(tableScan));
-      }
-      return TaskFactory.get(snjWork, parseContext.getConf());
-    } else {
-
-      StatsWork statsWork = new StatsWork(tableScan.getConf().getTableMetadata().getTableSpec());
-      statsWork.setAggKey(tableScan.getConf().getStatsAggPrefix());
-      statsWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir());
-      statsWork.setSourceTask(context.currentTask);
-      statsWork.setStatsReliable(parseContext.getConf().getBoolVar(
-          HiveConf.ConfVars.HIVE_STATS_RELIABLE));
-      return TaskFactory.get(statsWork, parseContext.getConf());
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 7a7460e..68240f0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -7150,7 +7150,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     // the following code is used to collect column stats when
     // hive.stats.autogather=true
     // and it is an insert overwrite or insert into table
-    if (dest_tab != null && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)
+    if (dest_tab != null
+        && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)
         && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER)
         && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
       if (dest_type.intValue() == QBMetaData.DEST_TABLE) {
@@ -10655,10 +10656,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       throws SemanticException {
 
     // if it is not analyze command and not column stats, then do not gatherstats
-    // if it is column stats, but it is not tez, do not gatherstats
-    if ((!qbp.isAnalyzeCommand() && qbp.getAnalyzeRewrite() == null)
-        || (qbp.getAnalyzeRewrite() != null && !HiveConf.getVar(conf,
-            HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez"))) {
+    if (!qbp.isAnalyzeCommand() && qbp.getAnalyzeRewrite() == null) {
       tsDesc.setGatherStats(false);
     } else {
       if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index a63f709..7b29370 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -22,8 +22,10 @@ import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -39,9 +41,9 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
-import org.apache.hadoop.hive.ql.exec.ColumnStatsTask;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.StatsTask;
+import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -49,12 +51,16 @@ import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
 import org.apache.hadoop.hive.ql.exec.spark.SparkTask;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
-import org.apache.hadoop.hive.ql.plan.ColumnStatsWork;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
@@ -64,6 +70,7 @@ import org.apache.hadoop.hive.ql.plan.LoadFileDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
@@ -74,16 +81,7 @@ import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.thrift.ThriftFormatter;
 import org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
+import org.apache.hadoop.mapred.InputFormat;
 
 /**
  * TaskCompiler is a the base class for classes that compile
@@ -273,9 +271,65 @@ public abstract class TaskCompiler {
     /*
      * If the query was the result of analyze table column compute statistics rewrite, create
      * a column stats task instead of a fetch task to persist stats to the metastore.
+     * As per HIVE-15903, we will also collect table stats when user computes column stats.
+     * That means, if isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()
+     * We need to collect table stats
+     * if isCStats, we need to include a basic stats task
+     * else it is ColumnStatsAutoGather, which should have a move task with a stats task already.
      */
     if (isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()) {
-      createColumnStatsTasks(pCtx, rootTasks, loadFileWork, isCStats, outerQueryLimit);
+      // map from tablename to task (ColumnStatsTask which includes a BasicStatsTask)
+      Map<String, StatsTask> map = new LinkedHashMap<>();
+      if (isCStats) {
+        if (rootTasks == null || rootTasks.size() != 1 || pCtx.getTopOps() == null
+            || pCtx.getTopOps().size() != 1) {
+          throw new SemanticException("Can not find correct root task!");
+        }
+        try {
+          Task<? extends Serializable> root = rootTasks.iterator().next();
+          StatsTask tsk = (StatsTask) genTableStats(pCtx, pCtx.getTopOps().values()
+              .iterator().next(), root, outputs);
+          root.addDependentTask(tsk);
+          map.put(extractTableFullName(tsk), tsk);
+        } catch (HiveException e) {
+          throw new SemanticException(e);
+        }
+        genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, map, outerQueryLimit, 0);
+      } else {
+        Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
+        getLeafTasks(rootTasks, leafTasks);
+        List<Task<? extends Serializable>> nonStatsLeafTasks = new ArrayList<>();
+        for (Task<? extends Serializable> tsk : leafTasks) {
+          // map table name to the correct ColumnStatsTask
+          if (tsk instanceof StatsTask) {
+            map.put(extractTableFullName((StatsTask) tsk), (StatsTask) tsk);
+          } else {
+            nonStatsLeafTasks.add(tsk);
+          }
+        }
+        // add cStatsTask as a dependent of all the nonStatsLeafTasks
+        for (Task<? extends Serializable> tsk : nonStatsLeafTasks) {
+          for (Task<? extends Serializable> cStatsTask : map.values()) {
+            tsk.addDependentTask(cStatsTask);
+          }
+        }
+        for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx
+            .getColumnStatsAutoGatherContexts()) {
+          if (!columnStatsAutoGatherContext.isInsertInto()) {
+            genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(),
+                columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, 0);
+          } else {
+            int numBitVector;
+            try {
+              numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
+            } catch (Exception e) {
+              throw new SemanticException(e.getMessage());
+            }
+            genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(),
+                columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, numBitVector);
+          }
+        }
+      }
     }
 
     decideExecMode(rootTasks, ctx, globalLimitCtx);
@@ -322,6 +376,44 @@ public abstract class TaskCompiler {
     }
   }
 
+  private String extractTableFullName(StatsTask tsk) throws SemanticException {
+    return tsk.getWork().getFullTableName();
+  }
+
+  private Task<?> genTableStats(ParseContext parseContext, TableScanOperator tableScan, Task currentTask, final HashSet<WriteEntity> outputs)
+      throws HiveException {
+    Class<? extends InputFormat> inputFormat = tableScan.getConf().getTableMetadata()
+        .getInputFormatClass();
+    Table table = tableScan.getConf().getTableMetadata();
+    List<Partition> partitions = new ArrayList<>();
+    if (table.isPartitioned()) {
+      partitions.addAll(parseContext.getPrunedPartitions(tableScan).getPartitions());
+      for (Partition partn : partitions) {
+        LOG.trace("adding part: " + partn);
+        outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_NO_LOCK));
+      }
+    }
+    TableSpec tableSpec = new TableSpec(table, partitions);
+    tableScan.getConf().getTableMetadata().setTableSpec(tableSpec);
+
+    if (inputFormat.equals(OrcInputFormat.class)) {
+      // For ORC, there is no Tez Job for table stats.
+      StatsWork columnStatsWork = new StatsWork(table, parseContext.getConf());
+      columnStatsWork.setFooterScan();
+      // If partition is specified, get pruned partition list
+      if (partitions.size() > 0) {
+        columnStatsWork.addInputPartitions(parseContext.getPrunedPartitions(tableScan).getPartitions());
+      }
+      return TaskFactory.get(columnStatsWork, parseContext.getConf());
+    } else {
+      BasicStatsWork statsWork = new BasicStatsWork(tableScan.getConf().getTableMetadata().getTableSpec());
+      StatsWork columnStatsWork = new StatsWork(table, statsWork, parseContext.getConf());
+      columnStatsWork.collectStatsFromAggregator(tableScan.getConf());
+      columnStatsWork.setSourceTask(currentTask);
+      return TaskFactory.get(columnStatsWork, parseContext.getConf());
+    }
+  }
+
   private void setLoadFileLocation(
       final ParseContext pCtx, LoadFileDesc lfd) throws SemanticException {
     // CTAS; make the movetask's destination directory the table's destination.
@@ -353,34 +445,6 @@ public abstract class TaskCompiler {
     lfd.setTargetDir(location);
   }
 
-  private void createColumnStatsTasks(final ParseContext pCtx,
-      final List<Task<? extends Serializable>> rootTasks,
-      List<LoadFileDesc> loadFileWork, boolean isCStats, int outerQueryLimit)
-      throws SemanticException {
-    Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
-    getLeafTasks(rootTasks, leafTasks);
-    if (isCStats) {
-      genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, leafTasks, outerQueryLimit, 0);
-    } else {
-      for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx
-          .getColumnStatsAutoGatherContexts()) {
-        if (!columnStatsAutoGatherContext.isInsertInto()) {
-          genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(),
-              columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, 0);
-        } else {
-          int numBitVector;
-          try {
-            numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
-          } catch (Exception e) {
-            throw new SemanticException(e.getMessage());
-          }
-          genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(),
-              columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, numBitVector);
-        }
-      }
-    }
-  }
-
   private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticException {
     try {
       String protoName = null;
@@ -419,10 +483,8 @@ public abstract class TaskCompiler {
       }
     }
 
-    // find all leaf tasks and make the DDLTask as a dependent task of all of
-    // them
-    HashSet<Task<? extends Serializable>> leaves =
-        new LinkedHashSet<>();
+    // find all leaf tasks and make the DDLTask as a dependent task on all of them
+    HashSet<Task<? extends Serializable>> leaves = new LinkedHashSet<>();
     getLeafTasks(rootTasks, leaves);
     assert (leaves.size() > 0);
     for (Task<? extends Serializable> task : leaves) {
@@ -452,10 +514,8 @@ public abstract class TaskCompiler {
    */
   @SuppressWarnings("unchecked")
   protected void genColumnStatsTask(AnalyzeRewriteContext analyzeRewrite,
-      List<LoadFileDesc> loadFileWork, Set<Task<? extends Serializable>> leafTasks,
-      int outerQueryLimit, int numBitVector) {
-    ColumnStatsTask cStatsTask;
-    ColumnStatsWork cStatsWork;
+      List<LoadFileDesc> loadFileWork, Map<String, StatsTask> map,
+      int outerQueryLimit, int numBitVector) throws SemanticException {
     FetchWork fetch;
     String tableName = analyzeRewrite.getTableName();
     List<String> colName = analyzeRewrite.getColName();
@@ -482,11 +542,12 @@ public abstract class TaskCompiler {
     fetch = new FetchWork(loadFileWork.get(0).getSourcePath(), resultTab, outerQueryLimit);
 
     ColumnStatsDesc cStatsDesc = new ColumnStatsDesc(tableName,
-        colName, colType, isTblLevel, numBitVector);
-    cStatsWork = new ColumnStatsWork(fetch, cStatsDesc, SessionState.get().getCurrentDatabase());
-    cStatsTask = (ColumnStatsTask) TaskFactory.get(cStatsWork, conf);
-    for (Task<? extends Serializable> tsk : leafTasks) {
-      tsk.addDependentTask(cStatsTask);
+        colName, colType, isTblLevel, numBitVector, fetch);
+    StatsTask columnStatsTask = map.get(tableName);
+    if (columnStatsTask == null) {
+      throw new SemanticException("Can not find " + tableName + " in genColumnStatsTask");
+    } else {
+      columnStatsTask.getWork().setColStats(cStatsDesc);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
index 6f21cae..36fe8a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
@@ -25,7 +25,6 @@ import java.util.Stack;
 import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -40,10 +39,10 @@ import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.SparkWork;
-import org.apache.hadoop.hive.ql.plan.StatsNoJobWork;
-import org.apache.hadoop.hive.ql.plan.StatsWork;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.mapred.InputFormat;
 
 import com.google.common.base.Preconditions;
@@ -78,9 +77,9 @@ public class SparkProcessAnalyzeTable implements NodeProcessor {
 
     ParseContext parseContext = context.parseContext;
 
+    Table table = tableScan.getConf().getTableMetadata();
     @SuppressWarnings("rawtypes")
-    Class<? extends InputFormat> inputFormat = tableScan.getConf().getTableMetadata()
-        .getInputFormatClass();
+    Class<? extends InputFormat> inputFormat = table.getInputFormatClass();
 
     if (parseContext.getQueryProperties().isAnalyzeCommand()) {
       Preconditions.checkArgument(tableScan.getChildOperators() == null
@@ -103,19 +102,16 @@ public class SparkProcessAnalyzeTable implements NodeProcessor {
         // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS
         // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
         // There will not be any Spark job above this task
-        StatsNoJobWork snjWork = new StatsNoJobWork(tableScan.getConf().getTableMetadata().getTableSpec());
-        snjWork.setStatsReliable(parseContext.getConf().getBoolVar(
-            HiveConf.ConfVars.HIVE_STATS_RELIABLE));
+        StatsWork statWork = new StatsWork(table, parseContext.getConf());
+        statWork.setFooterScan();
         // If partition is specified, get pruned partition list
         Set<Partition> confirmedParts = GenMapRedUtils.getConfirmedPartitionsForScan(tableScan);
         if (confirmedParts.size() > 0) {
-          Table source = tableScan.getConf().getTableMetadata();
           List<String> partCols = GenMapRedUtils.getPartitionColumns(tableScan);
-          PrunedPartitionList partList = new PrunedPartitionList(source, confirmedParts, partCols,
-              false);
-          snjWork.setPrunedPartitionList(partList);
+          PrunedPartitionList partList = new PrunedPartitionList(table, confirmedParts, partCols, false);
+          statWork.addInputPartitions(partList.getPartitions());
         }
-        Task<StatsNoJobWork> snjTask = TaskFactory.get(snjWork, parseContext.getConf());
+        Task<StatsWork> snjTask = TaskFactory.get(statWork, parseContext.getConf());
         snjTask.setParentTasks(null);
         context.rootTasks.remove(context.currentTask);
         context.rootTasks.add(snjTask);
@@ -126,19 +122,18 @@ public class SparkProcessAnalyzeTable implements NodeProcessor {
         // The plan consists of a simple SparkTask followed by a StatsTask.
         // The Spark task is just a simple TableScanOperator
 
-        StatsWork statsWork = new StatsWork(tableScan.getConf().getTableMetadata().getTableSpec());
-        statsWork.setAggKey(tableScan.getConf().getStatsAggPrefix());
-        statsWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir());
-        statsWork.setSourceTask(context.currentTask);
-        statsWork.setStatsReliable(parseContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
-        Task<StatsWork> statsTask = TaskFactory.get(statsWork, parseContext.getConf());
+        BasicStatsWork basicStatsWork = new BasicStatsWork(table.getTableSpec());
+        basicStatsWork.setNoScanAnalyzeCommand(parseContext.getQueryProperties().isNoScanAnalyzeCommand());
+        StatsWork columnStatsWork = new StatsWork(table, basicStatsWork, parseContext.getConf());
+        columnStatsWork.collectStatsFromAggregator(tableScan.getConf());
+        columnStatsWork.setSourceTask(context.currentTask);
+        Task<StatsWork> statsTask = TaskFactory.get(columnStatsWork, parseContext.getConf());
         context.currentTask.addDependentTask(statsTask);
 
         // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
         // The plan consists of a StatsTask only.
         if (parseContext.getQueryProperties().isNoScanAnalyzeCommand()) {
           statsTask.setParentTasks(null);
-          statsWork.setNoScanAnalyzeCommand(true);
           context.rootTasks.remove(context.currentTask);
           context.rootTasks.add(statsTask);
         }
@@ -148,9 +143,8 @@ public class SparkProcessAnalyzeTable implements NodeProcessor {
         Set<Partition> confirmedPartns = GenMapRedUtils.getConfirmedPartitionsForScan(tableScan);
         PrunedPartitionList partitions = null;
         if (confirmedPartns.size() > 0) {
-          Table source = tableScan.getConf().getTableMetadata();
           List<String> partCols = GenMapRedUtils.getPartitionColumns(tableScan);
-          partitions = new PrunedPartitionList(source, confirmedPartns, partCols, false);
+          partitions = new PrunedPartitionList(table, confirmedPartns, partCols, false);
         }
 
         MapWork w = utils.createMapWork(context, tableScan, sparkWork, partitions);

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsNoJobWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsNoJobWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsNoJobWork.java
new file mode 100644
index 0000000..d4f6a41
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsNoJobWork.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+import java.util.Set;
+
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
+
+/**
+ * Client-side stats aggregator task.
+ */
+public class BasicStatsNoJobWork implements Serializable {
+  private static final long serialVersionUID = 1L;
+
+  private TableSpec tableSpecs;
+  private boolean statsReliable;
+
+  private Set<Partition> pp;
+
+  public BasicStatsNoJobWork(TableSpec tableSpecs) {
+    this.tableSpecs = tableSpecs;
+  }
+
+  public TableSpec getTableSpecs() {
+    return tableSpecs;
+  }
+
+  public void setStatsReliable(boolean s1) {
+    statsReliable = s1;
+  }
+
+  public boolean isStatsReliable() {
+    return statsReliable;
+  }
+
+  public Set<Partition> getPartitions() {
+    return pp;
+  }
+
+  public void setPartitions(Set<Partition> partitions) {
+    pp = partitions;
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
new file mode 100644
index 0000000..0621bd4
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java
@@ -0,0 +1,196 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType;
+
+
+/**
+ * ConditionalStats.
+ *
+ */
+public class BasicStatsWork implements Serializable {
+  private static final long serialVersionUID = 1L;
+
+  private TableSpec tableSpecs;         // source table spec -- for TableScanOperator
+  private LoadTableDesc loadTableDesc;  // same as MoveWork.loadTableDesc -- for FileSinkOperator
+  private LoadFileDesc loadFileDesc;    // same as MoveWork.loadFileDesc -- for FileSinkOperator
+  private String aggKey;                // aggregation key prefix
+  private boolean statsReliable;        // are stats completely reliable
+
+  // If stats aggregator is not present, clear the current aggregator stats.
+  // For eg. if a merge is being performed, stats already collected by aggregator (numrows etc.)
+  // are still valid. However, if a load file is being performed, the old stats collected by
+  // aggregator are not valid. It might be a good idea to clear them instead of leaving wrong
+  // and old stats.
+  // Since HIVE-12661, we maintain the old stats (although may be wrong) for CBO
+  // purpose. We use a flag COLUMN_STATS_ACCURATE to
+  // show the accuracy of the stats.
+
+  private boolean clearAggregatorStats = false;
+
+  private boolean noStatsAggregator = false;
+
+  private boolean isNoScanAnalyzeCommand = false;
+
+  // sourceTask for TS is not changed (currently) but that of FS might be changed
+  // by various optimizers (auto.convert.join, for example)
+  // so this is set by DriverContext in runtime
+  private transient Task sourceTask;
+
+  private boolean isFollowedByColStats = false;
+
+  // used by FS based stats collector
+  private String statsTmpDir;
+
+  public BasicStatsWork() {
+  }
+
+  public BasicStatsWork(TableSpec tableSpecs) {
+    this.tableSpecs = tableSpecs;
+  }
+
+  public BasicStatsWork(LoadTableDesc loadTableDesc) {
+    this.loadTableDesc = loadTableDesc;
+  }
+
+  public BasicStatsWork(LoadFileDesc loadFileDesc) {
+    this.loadFileDesc = loadFileDesc;
+  }
+
+  public TableSpec getTableSpecs() {
+    return tableSpecs;
+  }
+
+  public LoadTableDesc getLoadTableDesc() {
+    return loadTableDesc;
+  }
+
+  public LoadFileDesc getLoadFileDesc() {
+    return loadFileDesc;
+  }
+
+  public void setAggKey(String aggK) {
+    aggKey = aggK;
+  }
+
+  @Explain(displayName = "Stats Aggregation Key Prefix", explainLevels = { Level.EXTENDED })
+  public String getAggKey() {
+    return aggKey;
+  }
+
+  public String getStatsTmpDir() {
+    return statsTmpDir;
+  }
+
+  public void setStatsTmpDir(String statsTmpDir) {
+    this.statsTmpDir = statsTmpDir;
+  }
+
+  public boolean getNoStatsAggregator() {
+    return noStatsAggregator;
+  }
+
+  public void setNoStatsAggregator(boolean noStatsAggregator) {
+    this.noStatsAggregator = noStatsAggregator;
+  }
+
+  public boolean isStatsReliable() {
+    return statsReliable;
+  }
+
+  public void setStatsReliable(boolean statsReliable) {
+    this.statsReliable = statsReliable;
+  }
+
+  public boolean isClearAggregatorStats() {
+    return clearAggregatorStats;
+  }
+
+  public void setClearAggregatorStats(boolean clearAggregatorStats) {
+    this.clearAggregatorStats = clearAggregatorStats;
+  }
+
+  /**
+   * @return the isNoScanAnalyzeCommand
+   */
+  public boolean isNoScanAnalyzeCommand() {
+    return isNoScanAnalyzeCommand;
+  }
+
+  /**
+   * @param isNoScanAnalyzeCommand the isNoScanAnalyzeCommand to set
+   */
+  public void setNoScanAnalyzeCommand(boolean isNoScanAnalyzeCommand) {
+    this.isNoScanAnalyzeCommand = isNoScanAnalyzeCommand;
+  }
+
+  public Task getSourceTask() {
+    return sourceTask;
+  }
+
+  public void setSourceTask(Task sourceTask) {
+    this.sourceTask = sourceTask;
+  }
+
+  public boolean isFollowedByColStats1() {
+    return isFollowedByColStats;
+  }
+
+  public void setFollowedByColStats1(boolean isFollowedByColStats) {
+    this.isFollowedByColStats = isFollowedByColStats;
+  }
+
+  public boolean isExplicitAnalyze() {
+    // ANALYZE TABLE
+    return (getTableSpecs() != null);
+  }
+  public boolean isTargetRewritten() {
+    // ANALYZE TABLE
+    if (isExplicitAnalyze()) {
+      return true;
+    }
+    // INSERT OVERWRITE
+    if (getLoadTableDesc() != null && getLoadTableDesc().getLoadFileType() == LoadFileType.REPLACE_ALL) {
+      return true;
+    }
+    // CREATE TABLE ... AS
+    if (getLoadFileDesc() != null && getLoadFileDesc().getCtasCreateTableDesc() != null) {
+      return true;
+    }
+    return false;
+  }
+
+  public String getTableName() {
+    BasicStatsWork work = this;
+    if (work.getLoadTableDesc() != null) {
+      return work.getLoadTableDesc().getTable().getTableName();
+    } else if (work.getTableSpecs() != null) {
+      return work.getTableSpecs().tableName;
+    } else {
+      return getLoadFileDesc().getCtasCreateTableDesc().getTableName();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsDesc.java
index 97f323f..1ca7ea6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsDesc.java
@@ -28,31 +28,25 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
 public class ColumnStatsDesc extends DDLDesc implements Serializable, Cloneable {
 
   private static final long serialVersionUID = 1L;
+  private FetchWork fWork;
+
   private boolean isTblLevel;
   private int numBitVector;
+  private boolean needMerge;
   private String tableName;
   private List<String> colName;
   private List<String> colType;
 
-  public ColumnStatsDesc() {
-  }
 
-  public ColumnStatsDesc(String tableName, List<String> colName, List<String> colType,
-      boolean isTblLevel) {
-    this.tableName = tableName;
-    this.colName = colName;
-    this.colType = colType;
-    this.isTblLevel = isTblLevel;
-    this.numBitVector = 0;
-  }
-  
   public ColumnStatsDesc(String tableName, List<String> colName,
-    List<String> colType, boolean isTblLevel, int numBitVector) {
+      List<String> colType, boolean isTblLevel, int numBitVector, FetchWork fWork1) {
     this.tableName = tableName;
     this.colName = colName;
     this.colType = colType;
     this.isTblLevel = isTblLevel;
     this.numBitVector = numBitVector;
+    this.needMerge = this.numBitVector != 0;
+    this.fWork = fWork1;
   }
 
   @Explain(displayName = "Table")
@@ -99,4 +93,13 @@ public class ColumnStatsDesc extends DDLDesc implements Serializable, Cloneable
     this.numBitVector = numBitVector;
   }
 
+  public boolean isNeedMerge() {
+    return needMerge;
+  }
+
+
+  public FetchWork getFWork() {
+    return fWork;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsWork.java
deleted file mode 100644
index 842fd1a..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsWork.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import java.io.Serializable;
-
-import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.exec.ListSinkOperator;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-
-
-/**
- * ColumnStats Work.
- *
- */
-@Explain(displayName = "Column Stats Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class ColumnStatsWork implements Serializable {
-  private static final long serialVersionUID = 1L;
-  private FetchWork fWork;
-  private ColumnStatsDesc colStats;
-  private String currentDatabaseName;
-  private static final int LIMIT = -1;
-
-
-  public ColumnStatsWork() {
-  }
-
-  public ColumnStatsWork(FetchWork work, ColumnStatsDesc colStats, String currentDatabaseName) {
-    this.fWork = work;
-    this.setColStats(colStats);
-    this.currentDatabaseName = currentDatabaseName;
-  }
-
-  @Override
-  public String toString() {
-    String ret;
-    ret = fWork.toString();
-    return ret;
-  }
-
-  public FetchWork getfWork() {
-    return fWork;
-  }
-
-  public void setfWork(FetchWork fWork) {
-    this.fWork = fWork;
-  }
-
-  @Explain(displayName = "Column Stats Desc")
-  public ColumnStatsDesc getColStats() {
-    return colStats;
-  }
-
-  public void setColStats(ColumnStatsDesc colStats) {
-    this.colStats = colStats;
-  }
-
-  public ListSinkOperator getSink() {
-    return fWork.getSink();
-  }
-
-  public void initializeForFetch(CompilationOpContext ctx) {
-    fWork.initializeForFetch(ctx);
-  }
-
-  public int getLeastNumRows() {
-    return fWork.getLeastNumRows();
-  }
-
-  public static int getLimit() {
-    return LIMIT;
-  }
-
-  public String getCurrentDatabaseName() {
-    return currentDatabaseName;
-  }
-
-  public void setCurrentDatabaseName(String currentDatabaseName) {
-    this.currentDatabaseName = currentDatabaseName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index 550e6f8..92c9768 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.ql.plan.Explain.Vectorization;
  *
  */
 @Explain(displayName = "File Output Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class FileSinkDesc extends AbstractOperatorDesc {
+public class FileSinkDesc extends AbstractOperatorDesc implements IStatsGatherDesc {
   private static final long serialVersionUID = 1L;
 
   public enum DPSortState {
@@ -373,6 +373,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
     this.gatherStats = gatherStats;
   }
 
+  @Override
   @Explain(displayName = "GatherStats", explainLevels = { Level.EXTENDED })
   public boolean isGatherStats() {
     return gatherStats;
@@ -389,6 +390,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
    * will be aggregated.
    * @return key prefix used for stats publishing and aggregation.
    */
+  @Override
   @Explain(displayName = "Stats Publishing Key Prefix", explainLevels = { Level.EXTENDED })
   public String getStatsAggPrefix() {
     // dirName uniquely identifies destination directory of a FileSinkOperator.
@@ -511,7 +513,8 @@ public class FileSinkDesc extends AbstractOperatorDesc {
   }
 
 
-  public String getStatsTmpDir() {
+  @Override
+  public String getTmpStatsDir() {
     return statsTmpDir;
   }
 
@@ -577,4 +580,5 @@ public class FileSinkDesc extends AbstractOperatorDesc {
     }
     return false;
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/IStatsGatherDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/IStatsGatherDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/IStatsGatherDesc.java
new file mode 100644
index 0000000..a83c4fb
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/IStatsGatherDesc.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+public interface IStatsGatherDesc {
+  public boolean isGatherStats();
+
+  String getTmpStatsDir();
+
+  String getStatsAggPrefix();
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
index 30d9912..c09589c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java
@@ -35,7 +35,7 @@ public class LoadFileDesc extends LoadDesc implements Serializable {
   // list of columns, comma separated
   private String columns;
   private String columnTypes;
-  private String destinationCreateTable;
+  private transient CreateTableDesc ctasCreateTableDesc;
   private boolean isMmCtas;
 
   public LoadFileDesc(final LoadFileDesc o) {
@@ -45,23 +45,16 @@ public class LoadFileDesc extends LoadDesc implements Serializable {
     this.isDfsDir = o.isDfsDir;
     this.columns = o.columns;
     this.columnTypes = o.columnTypes;
-    this.destinationCreateTable = o.destinationCreateTable;
     this.isMmCtas = o.isMmCtas;
+    this.ctasCreateTableDesc = o.ctasCreateTableDesc;
   }
 
   public LoadFileDesc(final CreateTableDesc createTableDesc, final CreateViewDesc  createViewDesc,
                       final Path sourcePath, final Path targetDir, final boolean isDfsDir,
-                      final String columns, final String columnTypes, AcidUtils.Operation writeType, boolean isMmCtas) {
+      final String columns, final String columnTypes, AcidUtils.Operation writeType, boolean isMmCtas) {
     this(sourcePath, targetDir, isDfsDir, columns, columnTypes, writeType, isMmCtas);
-    if (createTableDesc != null && createTableDesc.getDatabaseName() != null
-        && createTableDesc.getTableName() != null) {
-      destinationCreateTable = (createTableDesc.getTableName().contains(".") ? "" : createTableDesc
-          .getDatabaseName() + ".")
-          + createTableDesc.getTableName();
-    } else if (createViewDesc != null) {
-      // The work is already done in analyzeCreateView to assure that the view name is fully
-      // qualified.
-      destinationCreateTable = createViewDesc.getViewName();
+      if (createTableDesc != null && createTableDesc.isCTAS()) {
+        ctasCreateTableDesc = createTableDesc;
     }
   }
 
@@ -131,11 +124,8 @@ public class LoadFileDesc extends LoadDesc implements Serializable {
     this.columnTypes = columnTypes;
   }
 
-  /**
-   * @return the destinationCreateTable
-   */
-  public String getDestinationCreateTable(){
-    return destinationCreateTable;
+  public CreateTableDesc getCtasCreateTableDesc() {
+    return ctasCreateTableDesc;
   }
 
   public boolean isMmCtas() {

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsNoJobWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsNoJobWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsNoJobWork.java
deleted file mode 100644
index 77c04f6..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsNoJobWork.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import java.io.Serializable;
-
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
-import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-
-
-
-/**
- * Client-side stats aggregator task.
- */
-@Explain(displayName = "Stats-Aggr Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class StatsNoJobWork implements Serializable {
-  private static final long serialVersionUID = 1L;
-
-  private TableSpec tableSpecs;
-  private boolean statsReliable;
-  private PrunedPartitionList prunedPartitionList;
-
-  public StatsNoJobWork() {
-  }
-
-  public StatsNoJobWork(TableSpec tableSpecs) {
-    this.tableSpecs = tableSpecs;
-  }
-
-  public StatsNoJobWork(boolean statsReliable) {
-    this.statsReliable = statsReliable;
-  }
-
-  public TableSpec getTableSpecs() {
-    return tableSpecs;
-  }
-
-  public boolean isStatsReliable() {
-    return statsReliable;
-  }
-
-  public void setStatsReliable(boolean statsReliable) {
-    this.statsReliable = statsReliable;
-  }
-
-  public void setPrunedPartitionList(PrunedPartitionList prunedPartitionList) {
-    this.prunedPartitionList = prunedPartitionList;
-  }
-
-  public PrunedPartitionList getPrunedPartitionList() {
-    return prunedPartitionList;
-  }
-}


[17/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
new file mode 100644
index 0000000..e04eaf4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
@@ -0,0 +1,1433 @@
+PREHOOK: query: drop table src_multi1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi1 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: create table src_multi1 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi1
+PREHOOK: query: insert overwrite table src_multi1 select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: insert overwrite table src_multi1 select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain extended select * from src_multi1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select * from src_multi1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src_multi1
+          Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+          GatherStats: false
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: describe formatted src_multi1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi1
+POSTHOOK: query: describe formatted src_multi1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi1
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table a like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: create table b like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: from src
+insert overwrite table a select *
+insert overwrite table b select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a
+PREHOOK: Output: default@b
+POSTHOOK: query: from src
+insert overwrite table a select *
+insert overwrite table b select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@a
+POSTHOOK: query: describe formatted a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@a
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted b
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@a
+PREHOOK: Output: default@a
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@a
+POSTHOOK: Output: default@a
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@b
+PREHOOK: Output: default@b
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@b
+POSTHOOK: Output: default@b
+PREHOOK: query: create table a like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: create table b like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: from src
+insert overwrite table a select *
+insert into table b select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a
+PREHOOK: Output: default@b
+POSTHOOK: query: from src
+insert overwrite table a select *
+insert into table b select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a
+POSTHOOK: Output: default@b
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted a
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@a
+POSTHOOK: query: describe formatted a
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@a
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted b
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@b
+POSTHOOK: query: describe formatted b
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@b
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table src_multi2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_multi2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_multi2 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: create table src_multi2 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi2
+PREHOOK: query: insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: insert overwrite table src_multi2 select subq.key, src.value from (select * from src union select * from src1)subq join src on subq.key=src.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_multi2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_multi2
+POSTHOOK: query: describe formatted src_multi2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_multi2
+# col_name            	data_type           	comment             
+key                 	string              	default             
+value               	string              	default             
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	508                 
+	rawDataSize         	5400                
+	totalSize           	5908                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists nzhang_part14 (key string)
+  partitioned by (value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string)
+  partitioned by (value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: desc formatted nzhang_part14
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numPartitions       	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert overwrite table nzhang_part14 partition(value) 
+select key, value from (
+  select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a 
+  union all
+  select * from (select 'k2' as key, '' as value from src limit 2)b
+  union all 
+  select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert overwrite table nzhang_part14 partition(value) 
+select key, value from (
+  select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a 
+  union all
+  select * from (select 'k2' as key, '' as value from src limit 2)b
+  union all 
+  select * from (select 'k3' as key, ' ' as value from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@value= 
+POSTHOOK: Output: default@nzhang_part14@value=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value= ).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=__HIVE_DEFAULT_PARTITION__).key EXPRESSION []
+PREHOOK: query: desc formatted nzhang_part14 partition (value=' ')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition (value=' ')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+value               	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[ ]                 	 
+Database:           	default             	 
+Table:              	nzhang_part14       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
+	numFiles            	1                   
+	numRows             	2                   
+	rawDataSize         	4                   
+	totalSize           	6                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select key from nzhang_part14
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from nzhang_part14
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: nzhang_part14
+          Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 6 Data size: 516 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: drop table src5
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src5
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src5 as select key, value from src limit 5
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src5
+POSTHOOK: query: create table src5 as select key, value from src limit 5
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src5
+POSTHOOK: Lineage: src5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table nzhang_part14 partition(value)
+select key, value from src5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src5
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert overwrite table nzhang_part14 partition(value)
+select key, value from src5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src5
+POSTHOOK: Output: default@nzhang_part14@value=val_165
+POSTHOOK: Output: default@nzhang_part14@value=val_238
+POSTHOOK: Output: default@nzhang_part14@value=val_27
+POSTHOOK: Output: default@nzhang_part14@value=val_311
+POSTHOOK: Output: default@nzhang_part14@value=val_86
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_165).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_238).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_27).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_311).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(value=val_86).key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: explain select key from nzhang_part14
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from nzhang_part14
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: nzhang_part14
+          Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: PARTIAL
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 11 Data size: 946 Basic stats: COMPLETE Column stats: PARTIAL
+            ListSink
+
+PREHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alter5
+POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alter5
+PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta'
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@alter5
+POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta'
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@alter5
+POSTHOOK: Output: default@alter5@dt=a
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name            	data_type           	comment             
+col1                	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+dt                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[a]                 	 
+Database:           	default             	 
+Table:              	alter5              	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert overwrite table alter5 partition (dt='a') select key from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@alter5@dt=a
+POSTHOOK: query: insert overwrite table alter5 partition (dt='a') select key from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@alter5@dt=a
+POSTHOOK: Lineage: alter5 PARTITION(dt=a).col1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: describe formatted alter5 partition (dt='a')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@alter5
+POSTHOOK: query: describe formatted alter5 partition (dt='a')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@alter5
+# col_name            	data_type           	comment             
+col1                	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+dt                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[a]                 	 
+Database:           	default             	 
+Table:              	alter5              	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"col1\":\"true\"}}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	1406                
+	totalSize           	1906                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select * from alter5 where dt='a'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from alter5 where dt='a'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: alter5
+          Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: col1 (type: string), 'a' (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 500 Data size: 86000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: drop table src_stat_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table src_stat_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_stat_part
+POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_stat_part
+PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=1)
+select * from src1 limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=1)
+select * from src1 limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_part@partitionid=1
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_stat_part
+POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_stat_part
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+partitionid         	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	src_stat_part       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	5                   
+	rawDataSize         	38                  
+	totalSize           	43                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=2)
+select * from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src_stat_part@partitionid=2
+POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=2)
+select * from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src_stat_part@partitionid=2
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_stat_part
+POSTHOOK: query: describe formatted src_stat_part PARTITION(partitionId=2)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_stat_part
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+partitionid         	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2]                 	 
+Database:           	default             	 
+Table:              	src_stat_part       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	25                  
+	rawDataSize         	191                 
+	totalSize           	216                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table srcbucket_mapjoin
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table srcbucket_mapjoin
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: drop table tab_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tab_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab_part
+POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab_part
+PREHOOK: query: drop table srcbucket_mapjoin_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table srcbucket_mapjoin_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: describe formatted tab_part partition (ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tab_part
+POSTHOOK: query: describe formatted tab_part partition (ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tab_part
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08]        	 
+Database:           	default             	 
+Table:              	tab_part            	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	4                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[Order(col:key, order:1)]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab
+POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: describe formatted tab partition (ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tab
+POSTHOOK: query: describe formatted tab partition (ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tab
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08]        	 
+Database:           	default             	 
+Table:              	tab                 	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	2                   
+	numRows             	242                 
+	rawDataSize         	2566                
+	totalSize           	2808                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	2                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[Order(col:key, order:1)]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+  partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+  partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: describe formatted nzhang_part14
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: describe formatted nzhang_part14
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numPartitions       	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert overwrite table nzhang_part14 partition(ds, hr) 
+select key, value, ds, hr from (
+  select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a 
+  union all
+  select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+  union all 
+  select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: insert overwrite table nzhang_part14 partition(ds, hr) 
+select key, value, ds, hr from (
+  select * from (select 'k1' as key, cast(null as string) as value, '1' as ds, '2' as hr from src limit 2)a 
+  union all
+  select * from (select 'k2' as key, '' as value, '1' as ds, '3' as hr from src limit 2)b
+  union all 
+  select * from (select 'k3' as key, ' ' as value, '2' as ds, '1' as hr from src limit 2)c
+) T
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=2
+POSTHOOK: Output: default@nzhang_part14@ds=1/hr=3
+POSTHOOK: Output: default@nzhang_part14@ds=2/hr=1
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=2).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=1,hr=3).value EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).key EXPRESSION []
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2,hr=1).value EXPRESSION []
+PREHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 partition(ds='1', hr='3')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1, 3]              	 
+Database:           	default             	 
+Table:              	nzhang_part14       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	2                   
+	rawDataSize         	6                   
+	totalSize           	8                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part14@ds=2010-03-03
+POSTHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2010-03-03, 12]    	 
+Database:           	default             	 
+Table:              	nzhang_part14       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	1000                
+	rawDataSize         	10624               
+	totalSize           	11624               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table nzhang_part14
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@nzhang_part14
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: drop table nzhang_part14
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@nzhang_part14
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part14
+POSTHOOK: query: create table if not exists nzhang_part14 (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part14
+PREHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part14@ds=2010-03-03
+POSTHOOK: query: INSERT OVERWRITE TABLE nzhang_part14 PARTITION (ds='2010-03-03', hr)
+SELECT key, value, hr FROM srcpart WHERE ds is not null and hr>10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=11
+POSTHOOK: Output: default@nzhang_part14@ds=2010-03-03/hr=12
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part14 PARTITION(ds=2010-03-03,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part14
+POSTHOOK: query: desc formatted nzhang_part14 PARTITION(ds='2010-03-03', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part14
+# col_name            	data_type           	comment             
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2010-03-03, 12]    	 
+Database:           	default             	 
+Table:              	nzhang_part14       	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	numFiles            	1                   
+	numRows             	1000                
+	rawDataSize         	10624               
+	totalSize           	11624               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@a
+PREHOOK: Output: default@a
+POSTHOOK: query: drop table a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@a
+POSTHOOK: Output: default@a
+PREHOOK: query: create table a (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a
+POSTHOOK: query: create table a (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a
+PREHOOK: query: drop table b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@b
+PREHOOK: Output: default@b
+POSTHOOK: query: drop table b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@b
+POSTHOOK: Output: default@b
+PREHOOK: query: create table b (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@b
+POSTHOOK: query: create table b (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@b
+PREHOOK: query: drop table c
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table c
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table c (key string, value string)
+partitioned by (ds string, hr string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@c
+POSTHOOK: query: create table c (key string, value string)
+partitioned by (ds string, hr string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@c
+PREHOOK: query: FROM srcpart 
+INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@a@ds=2010-03-11
+PREHOOK: Output: default@b@ds=2010-04-11
+PREHOOK: Output: default@c@ds=2010-05-11
+POSTHOOK: query: FROM srcpart 
+INSERT OVERWRITE TABLE a PARTITION (ds='2010-03-11', hr) SELECT key, value, hr WHERE ds is not null and hr>10
+INSERT OVERWRITE TABLE b PARTITION (ds='2010-04-11', hr) SELECT key, value, hr WHERE ds is not null and hr>11
+INSERT OVERWRITE TABLE c PARTITION (ds='2010-05-11', hr) SELECT key, value, hr WHERE hr>0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@a@ds=2010-03-11/hr=11
+POSTHOOK: Output: default@a@ds=2010-03-11/hr=12
+POSTHOOK: Output: default@b@ds=2010-04-11/hr=12
+POSTHOOK: Output: default@c@ds=2010-05-11/hr=11
+POSTHOOK: Output: default@c@ds=2010-05-11/hr=12
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a PARTITION(ds=2010-03-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b PARTITION(ds=2010-04-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: c PARTITION(ds=2010-05-11,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain select key from a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: a
+          Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: explain select value from b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select value from b
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: b
+          Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: value (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1000 Data size: 91000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: explain select key from b
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from b
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: b
+          Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: explain select value from c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select value from c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: c
+          Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: value (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+
+PREHOOK: query: explain select key from c
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from c
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: c
+          Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            ListSink
+


[06/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
index 02d1605..118a48e 100644
--- a/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out
@@ -97,26 +97,32 @@ POSTHOOK: Output: default@loc
 PREHOOK: query: analyze table emp compute statistics for columns lastname,deptid,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@emp
+PREHOOK: Output: default@emp
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table emp compute statistics for columns lastname,deptid,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@emp
+POSTHOOK: Output: default@emp
 #### A masked pattern was here ####
 PREHOOK: query: analyze table dept compute statistics for columns deptname,deptid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dept
+PREHOOK: Output: default@dept
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table dept compute statistics for columns deptname,deptid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dept
+POSTHOOK: Output: default@dept
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc compute statistics for columns state,locid,zip,year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc
+PREHOOK: Output: default@loc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc compute statistics for columns state,locid,zip,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc
+POSTHOOK: Output: default@loc
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join1.q.out b/ql/src/test/results/clientpositive/spark/auto_join1.q.out
index d9cd770..49795dc 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join1.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join14.q.out b/ql/src/test/results/clientpositive/spark/auto_join14.q.out
index 82deefe..356cc85 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join14.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join14.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
 INSERT OVERWRITE TABLE dest1 SELECT src.key, srcpart.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join17.q.out b/ql/src/test/results/clientpositive/spark/auto_join17.q.out
index 6d63fa6..b3e01f2 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join17.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join17.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.*, src2.*

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join19.q.out b/ql/src/test/results/clientpositive/spark/auto_join19.q.out
index 88ef3f1..75426c1 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join19.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join19.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join2.q.out b/ql/src/test/results/clientpositive/spark/auto_join2.q.out
index e32abba..126acad 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join2.q.out
@@ -124,7 +124,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key)
 INSERT OVERWRITE TABLE dest_j2 SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join26.q.out b/ql/src/test/results/clientpositive/spark/auto_join26.q.out
index bfb3564..33e1609 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join26.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join26.q.out
@@ -118,7 +118,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1 
 SELECT  x.key, count(1) FROM src1 x JOIN src y ON (x.key = y.key) group by x.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join3.q.out b/ql/src/test/results/clientpositive/spark/auto_join3.q.out
index a17cc1a..11f909d 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join3.q.out
@@ -119,7 +119,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key = src3.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join4.q.out b/ql/src/test/results/clientpositive/spark/auto_join4.q.out
index a2c9a74..0502e62 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join4.q.out
@@ -118,7 +118,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join5.q.out b/ql/src/test/results/clientpositive/spark/auto_join5.q.out
index 8adbf1c..ae83ebc 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join5.q.out
@@ -118,7 +118,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join6.q.out b/ql/src/test/results/clientpositive/spark/auto_join6.q.out
index 69df6cf..b1ebb16 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join6.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join6.q.out
@@ -118,7 +118,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join7.q.out b/ql/src/test/results/clientpositive/spark/auto_join7.q.out
index e02ae6f..b425750 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join7.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join7.q.out
@@ -148,7 +148,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join8.q.out b/ql/src/test/results/clientpositive/spark/auto_join8.q.out
index 179e1d5..a789e8c 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join8.q.out
@@ -121,7 +121,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_join9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join9.q.out b/ql/src/test/results/clientpositive/spark/auto_join9.q.out
index b2ed51c..671eafe 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join9.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join9.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = '2008-04-08' and src1.hr = '12'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out
index c26949f..9308d47 100644
--- a/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out
@@ -1391,7 +1391,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1404,7 +1405,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
@@ -1611,7 +1613,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1624,7 +1627,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out
index cd4b83a..ded68d5 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out
@@ -134,7 +134,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -147,7 +148,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
@@ -319,7 +321,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -332,7 +335,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 
@@ -532,7 +536,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -545,7 +550,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (
   SELECT a.key key1, a.value value1, b.key key2, b.value value2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out b/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
index 2a00cb3..16666c9 100644
--- a/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
+++ b/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
@@ -25,10 +25,12 @@ POSTHOOK: Output: default@dec
 PREHOOK: query: ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dec
+PREHOOK: Output: default@dec
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE `dec` COMPUTE STATISTICS FOR COLUMNS value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dec
+POSTHOOK: Output: default@dec
 #### A masked pattern was here ####
 PREHOOK: query: DESC FORMATTED `dec` value
 PREHOOK: type: DESCTABLE
@@ -48,7 +50,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	                    	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: DROP TABLE IF EXISTS avro_dec
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS avro_dec

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket2.q.out b/ql/src/test/results/clientpositive/spark/bucket2.q.out
index 90c9e54..32f1258 100644
--- a/ql/src/test/results/clientpositive/spark/bucket2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket2.q.out
@@ -166,7 +166,8 @@ STAGE PLANS:
               name: default.bucket2_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket2_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket3.q.out b/ql/src/test/results/clientpositive/spark/bucket3.q.out
index 078460f..b88e73f 100644
--- a/ql/src/test/results/clientpositive/spark/bucket3.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket3.q.out
@@ -163,7 +163,8 @@ STAGE PLANS:
               name: default.bucket3_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket4.q.out b/ql/src/test/results/clientpositive/spark/bucket4.q.out
index 13e21b6..63683fc 100644
--- a/ql/src/test/results/clientpositive/spark/bucket4.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket4.q.out
@@ -169,7 +169,8 @@ STAGE PLANS:
               name: default.bucket4_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket4_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket5.q.out b/ql/src/test/results/clientpositive/spark/bucket5.q.out
index 8aa4e5f..e3e18a5 100644
--- a/ql/src/test/results/clientpositive/spark/bucket5.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket5.q.out
@@ -291,7 +291,8 @@ STAGE PLANS:
               name: default.bucketed_table
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-1
@@ -323,7 +324,8 @@ STAGE PLANS:
               name: default.unbucketed_table
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: FROM src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket6.q.out b/ql/src/test/results/clientpositive/spark/bucket6.q.out
index d5d53d3..b5d3347 100644
--- a/ql/src/test/results/clientpositive/spark/bucket6.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket6.q.out
@@ -65,7 +65,8 @@ STAGE PLANS:
               name: default.src_bucket
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table src_bucket select key,value from srcpart
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket_map_join_spark1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_spark1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_spark1.q.out
index 194f16e..71063f9 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_spark1.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_spark1.q.out
@@ -363,7 +363,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
@@ -659,7 +660,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket_map_join_spark2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_spark2.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_spark2.q.out
index bb66d1e..04a3a49 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_spark2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_spark2.q.out
@@ -347,7 +347,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
@@ -654,7 +655,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket_map_join_spark3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_spark3.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_spark3.q.out
index 10678a4..6ee414c 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_spark3.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_spark3.q.out
@@ -347,7 +347,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
@@ -643,7 +644,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
index c0197d3..461c7da 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out
@@ -112,41 +112,57 @@ PREHOOK: query: analyze table srcbucket_mapjoin compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcbucket_mapjoin
 PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table srcbucket_mapjoin compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcbucket_mapjoin
 POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: analyze table srcbucket_mapjoin_part compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcbucket_mapjoin_part
 PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table srcbucket_mapjoin_part compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcbucket_mapjoin_part
 POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: analyze table tab compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab
 PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Output: default@tab
+PREHOOK: Output: default@tab@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab
 POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Output: default@tab
+POSTHOOK: Output: default@tab@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: analyze table tab_part compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab_part
 PREHOOK: Input: default@tab_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part
+PREHOOK: Output: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab_part compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab_part
 POSTHOOK: Input: default@tab_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: explain
 select a.key, a.value, b.value
@@ -2248,25 +2264,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_mapjoin
-                  Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), substr(value, 5) (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: sum(_col1)
                         keys: _col0 (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: int)
                           sort order: +
                           Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: double)
         Map 4 
             Map Operator Tree:
@@ -2293,16 +2309,16 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col1 (type: double), _col0 (type: int)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col1 (type: int)
                     sort order: +
                     Map-reduce partition columns: _col1 (type: int)
-                    Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: double)
         Reducer 3 
             Reduce Operator Tree:
@@ -2358,25 +2374,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_mapjoin
-                  Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), substr(value, 5) (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: sum(_col1)
                         keys: _col0 (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: int)
                           sort order: +
                           Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: double)
         Map 4 
             Map Operator Tree:
@@ -2403,16 +2419,16 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col1 (type: double), _col0 (type: int)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col1 (type: int)
                     sort order: +
                     Map-reduce partition columns: _col1 (type: int)
-                    Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: double)
         Reducer 3 
             Reduce Operator Tree:
@@ -3494,25 +3510,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_mapjoin
-                  Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), substr(value, 5) (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: sum(_col1)
                         keys: _col0 (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: int)
                           sort order: +
                           Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: double)
         Map 4 
             Map Operator Tree:
@@ -3539,12 +3555,12 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: double)
         Reducer 3 
             Reduce Operator Tree:
@@ -3600,25 +3616,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_mapjoin
-                  Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), substr(value, 5) (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: sum(_col1)
                         keys: _col0 (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: int)
                           sort order: +
                           Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: double)
         Map 4 
             Map Operator Tree:
@@ -3645,12 +3661,12 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: double)
         Reducer 3 
             Reduce Operator Tree:
@@ -3706,19 +3722,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_mapjoin
-                  Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), substr(value, 5) (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 4 
             Map Operator Tree:
@@ -3745,12 +3761,12 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: complete
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: double)
         Reducer 3 
             Reduce Operator Tree:
@@ -3806,19 +3822,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_mapjoin
-                  Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), substr(value, 5) (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 4 
             Map Operator Tree:
@@ -3845,12 +3861,12 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int)
                 mode: complete
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1352 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: double)
         Reducer 3 
             Reduce Operator Tree:
@@ -5800,11 +5816,15 @@ PREHOOK: query: analyze table tab2 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab2
 PREHOOK: Input: default@tab2@ds=2008-04-08
+PREHOOK: Output: default@tab2
+PREHOOK: Output: default@tab2@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab2 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab2
 POSTHOOK: Input: default@tab2@ds=2008-04-08
+POSTHOOK: Output: default@tab2
+POSTHOOK: Output: default@tab2@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: explain select a.key, a.value, b.value
         from tab2 a join tab_part b on a.key = b.key and a.value = b.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
index bd0f938..3ce67e2 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
@@ -112,41 +112,57 @@ PREHOOK: query: analyze table srcbucket_mapjoin compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcbucket_mapjoin
 PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table srcbucket_mapjoin compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcbucket_mapjoin
 POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: analyze table srcbucket_mapjoin_part compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcbucket_mapjoin_part
 PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table srcbucket_mapjoin_part compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcbucket_mapjoin_part
 POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: analyze table tab compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab
 PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Output: default@tab
+PREHOOK: Output: default@tab@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab
 POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Output: default@tab
+POSTHOOK: Output: default@tab@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: analyze table tab_part compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab_part
 PREHOOK: Input: default@tab_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part
+PREHOOK: Output: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab_part compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab_part
 POSTHOOK: Input: default@tab_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value
 PREHOOK: type: QUERY
@@ -399,10 +415,12 @@ POSTHOOK: Lineage: tab1.value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.Field
 PREHOOK: query: analyze table tab1 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab1
+PREHOOK: Output: default@tab1
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab1 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab1
+POSTHOOK: Output: default@tab1
 #### A masked pattern was here ####
 PREHOOK: query: explain
 select a.key, a.value, b.value
@@ -1406,11 +1424,15 @@ PREHOOK: query: analyze table tab_part1 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab_part1
 PREHOOK: Input: default@tab_part1@ds=2008-04-08
+PREHOOK: Output: default@tab_part1
+PREHOOK: Output: default@tab_part1@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab_part1 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab_part1
 POSTHOOK: Input: default@tab_part1@ds=2008-04-08
+POSTHOOK: Output: default@tab_part1
+POSTHOOK: Output: default@tab_part1@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: explain
 select count(*)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out
index 0b1ee92..d6e45d5 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out
@@ -613,7 +613,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -968,7 +969,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out
index 49d7cc9..e801a80 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out
@@ -341,7 +341,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -702,7 +703,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -1129,7 +1131,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
index 1db5b2c..d10a30c 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
@@ -365,7 +365,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -726,7 +727,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out
index 563e816..0c6c2c7 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin4.q.out
@@ -361,7 +361,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
@@ -706,7 +707,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out
index 5e57618..f7344de 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin5.q.out
@@ -460,7 +460,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -866,7 +867,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
index bcf5b79..0d7def7 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out
@@ -117,10 +117,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.srcbucket_mapjoin_part_2
                     numFiles 2
-                    numRows 0
                     partition_columns ds/hr
                     partition_columns.types string:string
-                    rawDataSize 0
                     serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -220,10 +218,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.srcbucket_mapjoin_part_1
                     numFiles 2
-                    numRows 0
                     partition_columns ds/hr
                     partition_columns.types string:string
-                    rawDataSize 0
                     serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out
index 2bfb4af..dfba4ef 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative.q.out
@@ -301,6 +301,7 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out
index 277eb7f..0504a43 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative2.q.out
@@ -370,6 +370,7 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out
index 81a064b..814553d 100644
--- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out
@@ -186,7 +186,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -362,7 +363,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -562,7 +564,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -768,7 +771,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -956,7 +960,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.v1, b.v2) 
@@ -1144,7 +1149,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key+a.key, concat(a.value, b.value) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out
index 4245aa1..faada6f 100644
--- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out
@@ -154,7 +154,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, a.key, concat(a.value, b.value) 
@@ -345,7 +346,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, a.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out
index 5e4e5ef..8282a5a 100644
--- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out
@@ -164,7 +164,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, a.key2, concat(a.value, b.value) 
@@ -347,7 +348,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT subq1.key, subq1.key2, subq1.value from
@@ -530,7 +532,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
@@ -648,7 +651,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
@@ -772,7 +776,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT subq2.key, subq2.key2, subq2.value from
@@ -973,7 +978,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT subq2.k2, subq2.k1, subq2.value from
@@ -1184,5 +1190,6 @@ STAGE PLANS:
               name: default.test_table4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out
index 52bf6ee..c9b03a1 100644
--- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out
@@ -164,7 +164,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -344,7 +345,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 
@@ -530,7 +532,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, concat(a.value, b.value) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out
index 8263b6f..2198dc2 100644
--- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out
@@ -162,7 +162,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.key, b.key, concat(a.value, b.value) 
@@ -339,7 +340,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT b.key, a.key, concat(a.value, b.value) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out b/ql/src/test/results/clientpositive/spark/ctas.q.out
index d958a32..2ccd1dd 100644
--- a/ql/src/test/results/clientpositive/spark/ctas.q.out
+++ b/ql/src/test/results/clientpositive/spark/ctas.q.out
@@ -94,7 +94,8 @@ STAGE PLANS:
           name: default.nzhang_CTAS1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -240,7 +241,8 @@ STAGE PLANS:
           name: default.nzhang_ctas2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -386,7 +388,8 @@ STAGE PLANS:
           name: default.nzhang_ctas3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -596,7 +599,8 @@ STAGE PLANS:
           name: default.nzhang_ctas4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -746,7 +750,8 @@ STAGE PLANS:
           name: default.nzhang_ctas5
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out b/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
index 8693026..86d6e1a 100644
--- a/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
+++ b/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
@@ -166,7 +166,8 @@ STAGE PLANS:
               name: default.bucket2_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket2_1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
index 4f4dddd..0379c9c 100644
--- a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
+++ b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
@@ -239,7 +239,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -252,7 +253,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: SELECT dest1.* FROM dest1
 PREHOOK: type: QUERY
@@ -453,7 +455,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: SELECT * FROM tmptable x SORT BY x.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby1.q.out b/ql/src/test/results/clientpositive/spark/groupby1.q.out
index 42ce243..68cb6d7 100644
--- a/ql/src/test/results/clientpositive/spark/groupby1.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby1.q.out
@@ -86,7 +86,8 @@ STAGE PLANS:
               name: default.dest_g1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby10.q.out b/ql/src/test/results/clientpositive/spark/groupby10.q.out
index b7d19fe..f03e4e3 100644
--- a/ql/src/test/results/clientpositive/spark/groupby10.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby10.q.out
@@ -151,7 +151,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -164,7 +165,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM INPUT
 INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key
@@ -363,7 +365,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -376,7 +379,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM INPUT
 INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key
@@ -544,7 +548,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -557,7 +562,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM INPUT
 INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby11.q.out b/ql/src/test/results/clientpositive/spark/groupby11.q.out
index a0f99c4..94ab3e0 100644
--- a/ql/src/test/results/clientpositive/spark/groupby11.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby11.q.out
@@ -155,7 +155,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -170,7 +171,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 partition(ds='111')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby1_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby1_map.q.out b/ql/src/test/results/clientpositive/spark/groupby1_map.q.out
index b414aa6..775ca8a 100644
--- a/ql/src/test/results/clientpositive/spark/groupby1_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby1_map.q.out
@@ -77,7 +77,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out b/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out
index a01cee1..20e9c65 100644
--- a/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out
@@ -77,7 +77,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out
index f7b7f7a..8c3f571 100644
--- a/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out
@@ -92,7 +92,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out
index 1b7e53b..ea607cd 100644
--- a/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out
@@ -71,7 +71,8 @@ STAGE PLANS:
               name: default.dest_g1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby2.q.out b/ql/src/test/results/clientpositive/spark/groupby2.q.out
index a5cd0e6..966c273 100644
--- a/ql/src/test/results/clientpositive/spark/groupby2.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby2.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby2_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby2_map.q.out b/ql/src/test/results/clientpositive/spark/groupby2_map.q.out
index d2b69af..c716bbd 100644
--- a/ql/src/test/results/clientpositive/spark/groupby2_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby2_map.q.out
@@ -79,7 +79,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out
index 4ad0568..faf69b6 100644
--- a/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out
@@ -79,7 +79,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
@@ -187,7 +188,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby2_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby2_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby2_map_skew.q.out
index f4a567e..e297424 100644
--- a/ql/src/test/results/clientpositive/spark/groupby2_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby2_map_skew.q.out
@@ -94,7 +94,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out
index 8ecf769..83e794a 100644
--- a/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out
@@ -72,7 +72,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out
index 3ede0fc..171932a 100644
--- a/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby3.q.out b/ql/src/test/results/clientpositive/spark/groupby3.q.out
index a34e89e1..c314763 100644
--- a/ql/src/test/results/clientpositive/spark/groupby3.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby3.q.out
@@ -101,7 +101,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby3_map.q.out b/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
index 71f8dc0..9caf47b 100644
--- a/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby3_map.q.out
@@ -95,7 +95,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
index 47ef5cb..6782761 100644
--- a/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out
@@ -99,7 +99,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby3_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby3_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby3_map_skew.q.out
index 7cfca81..2f10160 100644
--- a/ql/src/test/results/clientpositive/spark/groupby3_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby3_map_skew.q.out
@@ -108,7 +108,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out
index b2993a6..d07eea5 100644
--- a/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out
@@ -88,7 +88,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out
index d152a07..8bcb912 100644
--- a/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out
@@ -92,7 +92,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby4.q.out b/ql/src/test/results/clientpositive/spark/groupby4.q.out
index 3ad01d0..1146c84 100644
--- a/ql/src/test/results/clientpositive/spark/groupby4.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby4.q.out
@@ -80,7 +80,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby4_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby4_map.q.out b/ql/src/test/results/clientpositive/spark/groupby4_map.q.out
index 7cb3600..96f3488 100644
--- a/ql/src/test/results/clientpositive/spark/groupby4_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby4_map.q.out
@@ -71,7 +71,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/groupby4_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby4_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby4_map_skew.q.out
index ef287ad..3bf8d99 100644
--- a/ql/src/test/results/clientpositive/spark/groupby4_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby4_map_skew.q.out
@@ -71,7 +71,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1)
 PREHOOK: type: QUERY


[12/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/ctas_colname.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out
index 795517d..144811d 100644
--- a/ql/src/test/results/clientpositive/ctas_colname.q.out
+++ b/ql/src/test/results/clientpositive/ctas_colname.q.out
@@ -62,7 +62,8 @@ STAGE PLANS:
           name: default.summary
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table summary as select *, key + 1, concat(value, value) from src limit 20
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -221,7 +222,8 @@ STAGE PLANS:
           name: default.x4
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table x4 as select *, rank() over(partition by key order by value) as rr from src1
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -411,7 +413,8 @@ STAGE PLANS:
           name: default.x5
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table x5 as select *, lead(key,1) over(partition by key order by value) as lead1 from src limit 20
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -551,7 +554,8 @@ STAGE PLANS:
           name: default.x6
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -731,7 +735,8 @@ STAGE PLANS:
           name: default.x7
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table x7 as select * from (select *, count(value) from src group by key, value) a
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -1164,7 +1169,8 @@ STAGE PLANS:
           name: default.x8
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -1300,7 +1306,8 @@ STAGE PLANS:
           name: default.x9
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table x9 as select * from (select max(value),key from src group by key having key < 9 AND max(value) IS NOT NULL) a
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
index fe5cf87..ad4402a 100644
--- a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
+++ b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
@@ -74,7 +74,8 @@ STAGE PLANS:
           name: db1.table_db1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/decimal_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_stats.q.out b/ql/src/test/results/clientpositive/decimal_stats.q.out
index 9d7e750..b84171b 100644
--- a/ql/src/test/results/clientpositive/decimal_stats.q.out
+++ b/ql/src/test/results/clientpositive/decimal_stats.q.out
@@ -35,10 +35,12 @@ POSTHOOK: Lineage: decimal_1.v EXPRESSION []
 PREHOOK: query: analyze table decimal_1 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_1
+PREHOOK: Output: default@decimal_1
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table decimal_1 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_1
+POSTHOOK: Output: default@decimal_1
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted decimal_1 v
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/deleteAnalyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/deleteAnalyze.q.out b/ql/src/test/results/clientpositive/deleteAnalyze.q.out
index 840f053..1d051fd 100644
--- a/ql/src/test/results/clientpositive/deleteAnalyze.q.out
+++ b/ql/src/test/results/clientpositive/deleteAnalyze.q.out
@@ -87,18 +87,58 @@ COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 PREHOOK: query: analyze table testdeci2 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@testdeci2
+PREHOOK: Output: default@testdeci2
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table testdeci2 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@testdeci2
+POSTHOOK: Output: default@testdeci2
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted testdeci2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@testdeci2
+POSTHOOK: query: describe formatted testdeci2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@testdeci2
+# col_name            	data_type           	comment             
+id                  	int                 	                    
+amount              	decimal(10,3)       	                    
+sales_tax           	decimal(10,3)       	                    
+item                	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
 #### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"amount\":\"true\",\"id\":\"true\",\"item\":\"true\",\"sales_tax\":\"true\"}}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
 PREHOOK: query: analyze table testdeci2 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@testdeci2
+PREHOOK: Output: default@testdeci2
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table testdeci2 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@testdeci2
+POSTHOOK: Output: default@testdeci2
 #### A masked pattern was here ####
 PREHOOK: query: explain
 select s.id,
@@ -124,23 +164,23 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s
-            Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: item is not null (type: boolean)
-              Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: id (type: int), item (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col1 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: int)
           TableScan
             alias: d
-            Statistics: Num rows: 2 Data size: 624 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 312 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((id = 2) and item is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 312 Basic stats: COMPLETE Column stats: COMPLETE
@@ -162,14 +202,14 @@ STAGE PLANS:
             0 _col1 (type: string)
             1 _col3 (type: string)
           outputColumnNames: _col0, _col3, _col4
-          Statistics: Num rows: 2 Data size: 456 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col0 (type: int), COALESCE(_col3,0) (type: decimal(13,3)), COALESCE(_col4,0) (type: decimal(13,3))
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 456 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 2 Data size: 456 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out b/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out
index 65af3f8..8e2e77b 100644
--- a/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out
+++ b/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out
@@ -108,7 +108,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: sourceIP, avgTimeOnSite, adRevenue
           Column Types: string, int, float
@@ -131,7 +132,8 @@ STAGE PLANS:
           TableScan
             alias: uservisits_web_text_none
             Statistics: Num rows: 1 Data size: 7060 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
+            Statistics Aggregation Key Prefix: default.uservisits_web_text_none/
+            GatherStats: true
             Select Operator
               expressions: sourceip (type: string), adrevenue (type: float), avgtimeonsite (type: int)
               outputColumnNames: sourceip, adrevenue, avgtimeonsite
@@ -230,7 +232,9 @@ STAGE PLANS:
             MultiFileSpray: false
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.uservisits_web_text_none/
       Column Stats Desc:
           Columns: sourceIP, avgTimeOnSite, adRevenue
           Column Types: string, int, float
@@ -240,10 +244,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue
 PREHOOK: type: QUERY
 PREHOOK: Input: default@uservisits_web_text_none
+PREHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@uservisits_web_text_none
+POSTHOOK: Output: default@uservisits_web_text_none
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP
 PREHOOK: type: DESCTABLE
@@ -263,7 +269,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: desc formatted UserVisits_web_text_none avgTimeOnSite
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@uservisits_web_text_none
@@ -282,7 +288,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: desc formatted UserVisits_web_text_none adRevenue
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@uservisits_web_text_none
@@ -301,7 +307,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: CREATE TABLE empty_tab(
    a int,
    b double,
@@ -386,7 +392,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: a, b, c, d, e
           Column Types: int, double, string, boolean, binary
@@ -395,10 +402,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e
 PREHOOK: type: QUERY
 PREHOOK: Input: default@empty_tab
+PREHOOK: Output: default@empty_tab
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@empty_tab
+POSTHOOK: Output: default@empty_tab
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted empty_tab a
 PREHOOK: type: DESCTABLE
@@ -507,7 +516,7 @@ POSTHOOK: query: desc extended default.UserVisits_web_text_none sourceIP
 POSTHOOK: type: DESCTABLE
 POSTHOOK: Input: default@uservisits_web_text_none
 sourceIP            	string              	from deserializer   
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 
 PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: test@uservisits_web_text_none
@@ -562,15 +571,17 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"adrevenue\":\"true\",\"avgtimeonsite\":\"true\",\"sourceip\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sKeyword
 PREHOOK: type: QUERY
 PREHOOK: Input: test@uservisits_web_text_none
 #### A masked pattern was here ####
+PREHOOK: Output: test@uservisits_web_text_none
 POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sKeyword
 POSTHOOK: type: QUERY
 POSTHOOK: Input: test@uservisits_web_text_none
 #### A masked pattern was here ####
+POSTHOOK: Output: test@uservisits_web_text_none
 PREHOOK: query: desc extended UserVisits_web_text_none sKeyword
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: test@uservisits_web_text_none
@@ -578,7 +589,7 @@ POSTHOOK: query: desc extended UserVisits_web_text_none sKeyword
 POSTHOOK: type: DESCTABLE
 POSTHOOK: Input: test@uservisits_web_text_none
 sKeyword            	string              	from deserializer   
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"skeyword\":\"true\"}}	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"skeyword\":\"true\"}}	 
 PREHOOK: query: desc formatted UserVisits_web_text_none sKeyword
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: test@uservisits_web_text_none
@@ -597,7 +608,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"skeyword\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"skeyword\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: desc formatted test.UserVisits_web_text_none sKeyword
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: test@uservisits_web_text_none
@@ -616,4 +627,4 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"skeyword\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"skeyword\":\"true\"}}	 	 	 	 	 	 	 	 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/distinct_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/distinct_stats.q.out b/ql/src/test/results/clientpositive/distinct_stats.q.out
index 73b4add..bc2ab02 100644
--- a/ql/src/test/results/clientpositive/distinct_stats.q.out
+++ b/ql/src/test/results/clientpositive/distinct_stats.q.out
@@ -19,10 +19,12 @@ POSTHOOK: Lineage: t1.b SIMPLE [(src)src.FieldSchema(name:value, type:string, co
 PREHOOK: query: analyze table t1 compute statistics for columns a,b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table t1 compute statistics for columns a,b
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
 #### A masked pattern was here ####
 PREHOOK: query: explain 
 select count(distinct b) from t1 group by a

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/drop_table_with_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/drop_table_with_stats.q.out b/ql/src/test/results/clientpositive/drop_table_with_stats.q.out
index 52aa10a..119f74a 100644
--- a/ql/src/test/results/clientpositive/drop_table_with_stats.q.out
+++ b/ql/src/test/results/clientpositive/drop_table_with_stats.q.out
@@ -30,10 +30,12 @@ PREHOOK: query: ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key
 PREHOOK: type: QUERY
 PREHOOK: Input: tblstatsdb1@testtable
 #### A masked pattern was here ####
+PREHOOK: Output: tblstatsdb1@testtable
 POSTHOOK: query: ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: tblstatsdb1@testtable
 #### A masked pattern was here ####
+POSTHOOK: Output: tblstatsdb1@testtable
 PREHOOK: query: CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:tblstatsdb1
@@ -54,10 +56,12 @@ PREHOOK: query: ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key
 PREHOOK: type: QUERY
 PREHOOK: Input: tblstatsdb1@testtable1
 #### A masked pattern was here ####
+PREHOOK: Output: tblstatsdb1@testtable1
 POSTHOOK: query: ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: tblstatsdb1@testtable1
 #### A masked pattern was here ####
+POSTHOOK: Output: tblstatsdb1@testtable1
 PREHOOK: query: CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:tblstatsdb1
@@ -78,10 +82,12 @@ PREHOOK: query: ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key
 PREHOOK: type: QUERY
 PREHOOK: Input: tblstatsdb1@testtable2
 #### A masked pattern was here ####
+PREHOOK: Output: tblstatsdb1@testtable2
 POSTHOOK: query: ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: tblstatsdb1@testtable2
 #### A masked pattern was here ####
+POSTHOOK: Output: tblstatsdb1@testtable2
 PREHOOK: query: DROP TABLE tblstatsdb1.testtable
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: tblstatsdb1@testtable
@@ -146,10 +152,12 @@ PREHOOK: query: ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key
 PREHOOK: type: QUERY
 PREHOOK: Input: tblstatsdb2@testtable
 #### A masked pattern was here ####
+PREHOOK: Output: tblstatsdb2@testtable
 POSTHOOK: query: ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: tblstatsdb2@testtable
 #### A masked pattern was here ####
+POSTHOOK: Output: tblstatsdb2@testtable
 PREHOOK: query: CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: TBLSTATSDB2@TestTable1
@@ -170,10 +178,12 @@ PREHOOK: query: ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key
 PREHOOK: type: QUERY
 PREHOOK: Input: tblstatsdb2@testtable1
 #### A masked pattern was here ####
+PREHOOK: Output: tblstatsdb2@testtable1
 POSTHOOK: query: ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: tblstatsdb2@testtable1
 #### A masked pattern was here ####
+POSTHOOK: Output: tblstatsdb2@testtable1
 PREHOOK: query: CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: TBLSTATSDB2@TESTTABLE2
@@ -194,10 +204,12 @@ PREHOOK: query: ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key
 PREHOOK: type: QUERY
 PREHOOK: Input: tblstatsdb2@testtable2
 #### A masked pattern was here ####
+PREHOOK: Output: tblstatsdb2@testtable2
 POSTHOOK: query: ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: tblstatsdb2@testtable2
 #### A masked pattern was here ####
+POSTHOOK: Output: tblstatsdb2@testtable2
 PREHOOK: query: DROP TABLE TBLSTATSDB2.testtable
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: tblstatsdb2@testtable

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
index 76d0b7b..8199235 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid2.q.out
@@ -69,5 +69,6 @@ STAGE PLANS:
               name: default.non_acid
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
index 9fefca1..f1654d4 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
@@ -546,22 +546,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: t1
-            Statistics: Num rows: 1 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
               predicate: key is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: UDFToDouble(_col0) (type: double)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: UDFToDouble(_col0) (type: double)
-                  Statistics: Num rows: 1 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col0 (type: string), _col1 (type: string)
                   auto parallelism: false
@@ -641,6 +641,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               column.name.delimiter ,
               columns key,value
@@ -649,8 +650,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
               name default.src
               numFiles 1
-              numRows 0
-              rawDataSize 0
+              numRows 500
+              rawDataSize 5312
               serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -661,6 +662,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 column.name.delimiter ,
                 columns key,value
@@ -669,8 +671,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 name default.src
                 numFiles 1
-                numRows 0
-                rawDataSize 0
+                numRows 500
+                rawDataSize 5312
                 serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
index ebe5085..4eee575 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
@@ -48,10 +48,12 @@ src
 PREHOOK: query: ANALYZE TABLE encrypted_table COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@encrypted_table
+PREHOOK: Output: default@encrypted_table
 #### A PARTIAL masked pattern was here #### data/warehouse/encrypted_table/.hive-staging
 POSTHOOK: query: ANALYZE TABLE encrypted_table COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@encrypted_table
+POSTHOOK: Output: default@encrypted_table
 #### A PARTIAL masked pattern was here #### data/warehouse/encrypted_table/.hive-staging
 PREHOOK: query: DESCRIBE FORMATTED encrypted_table key
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out b/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out
index 5fd06af..90d42fb 100644
--- a/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out
+++ b/ql/src/test/results/clientpositive/exec_parallel_column_stats.q.out
@@ -1,6 +1,18 @@
-PREHOOK: query: explain analyze table src compute statistics for columns
+PREHOOK: query: create table t as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t
+POSTHOOK: query: create table t as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t
+POSTHOOK: Lineage: t.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain analyze table t compute statistics for columns
 PREHOOK: type: QUERY
-POSTHOOK: query: explain analyze table src compute statistics for columns
+POSTHOOK: query: explain analyze table t compute statistics for columns
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
@@ -11,7 +23,7 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: src
+            alias: t
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
@@ -41,17 +53,20 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string
-          Table: default.src
+          Table: default.t
 
-PREHOOK: query: analyze table src compute statistics for columns
+PREHOOK: query: analyze table t compute statistics for columns
 PREHOOK: type: QUERY
-PREHOOK: Input: default@src
+PREHOOK: Input: default@t
+PREHOOK: Output: default@t
 #### A masked pattern was here ####
-POSTHOOK: query: analyze table src compute statistics for columns
+POSTHOOK: query: analyze table t compute statistics for columns
 POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
+POSTHOOK: Input: default@t
+POSTHOOK: Output: default@t
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/explain_ddl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_ddl.q.out b/ql/src/test/results/clientpositive/explain_ddl.q.out
index e108e22..f0e54c5 100644
--- a/ql/src/test/results/clientpositive/explain_ddl.q.out
+++ b/ql/src/test/results/clientpositive/explain_ddl.q.out
@@ -102,7 +102,8 @@ STAGE PLANS:
           name: default.M1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -194,7 +195,8 @@ STAGE PLANS:
           name: default.M1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -288,7 +290,8 @@ STAGE PLANS:
           name: default.M1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -380,7 +383,8 @@ STAGE PLANS:
           name: default.V1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -540,7 +544,8 @@ STAGE PLANS:
               name: default.m1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out
index b12d3a1..5f42737 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out
@@ -33,6 +33,10 @@ PREHOOK: Input: default@date_dim
 PREHOOK: Input: default@date_dim@d_date_sk=2416945
 PREHOOK: Input: default@date_dim@d_date_sk=2416946
 PREHOOK: Input: default@date_dim@d_date_sk=2416947
+PREHOOK: Output: default@date_dim
+PREHOOK: Output: default@date_dim@d_date_sk=2416945
+PREHOOK: Output: default@date_dim@d_date_sk=2416946
+PREHOOK: Output: default@date_dim@d_date_sk=2416947
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table date_dim partition(d_date_sk) compute statistics for columns
 POSTHOOK: type: QUERY
@@ -40,6 +44,10 @@ POSTHOOK: Input: default@date_dim
 POSTHOOK: Input: default@date_dim@d_date_sk=2416945
 POSTHOOK: Input: default@date_dim@d_date_sk=2416946
 POSTHOOK: Input: default@date_dim@d_date_sk=2416947
+POSTHOOK: Output: default@date_dim
+POSTHOOK: Output: default@date_dim@d_date_sk=2416945
+POSTHOOK: Output: default@date_dim@d_date_sk=2416946
+POSTHOOK: Output: default@date_dim@d_date_sk=2416947
 #### A masked pattern was here ####
 PREHOOK: query: explain select count(*) from date_dim where d_date > date "1900-01-02" and d_date_sk= 2416945
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
index 8933a7d..25f5372 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
@@ -59,21 +59,29 @@ PREHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statisti
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_1d
 PREHOOK: Input: default@loc_orc_1d@year=2000
+PREHOOK: Output: default@loc_orc_1d
+PREHOOK: Output: default@loc_orc_1d@year=2000
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_1d
 POSTHOOK: Input: default@loc_orc_1d@year=2000
+POSTHOOK: Output: default@loc_orc_1d
+POSTHOOK: Output: default@loc_orc_1d@year=2000
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_1d
 PREHOOK: Input: default@loc_orc_1d@year=2001
+PREHOOK: Output: default@loc_orc_1d
+PREHOOK: Output: default@loc_orc_1d@year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_1d
 POSTHOOK: Input: default@loc_orc_1d@year=2001
+POSTHOOK: Output: default@loc_orc_1d
+POSTHOOK: Output: default@loc_orc_1d@year=2001
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') state
 PREHOOK: type: DESCTABLE
@@ -356,41 +364,57 @@ PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2000') compu
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2000
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94086/year=2000
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2000') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2000
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2000
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2000
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94087/year=2000
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2000
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2000
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2001
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94086/year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2001
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2001
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94087/year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2001
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2001
 #### A masked pattern was here ####
 PREHOOK: query: explain extended select state from loc_orc_2d
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
index 8427b2d..5f74166 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
@@ -67,21 +67,29 @@ PREHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statisti
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_1d
 PREHOOK: Input: default@loc_orc_1d@year=2001
+PREHOOK: Output: default@loc_orc_1d
+PREHOOK: Output: default@loc_orc_1d@year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_1d
 POSTHOOK: Input: default@loc_orc_1d@year=2001
+POSTHOOK: Output: default@loc_orc_1d
+POSTHOOK: Output: default@loc_orc_1d@year=2001
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_1d
 PREHOOK: Input: default@loc_orc_1d@year=2002
+PREHOOK: Output: default@loc_orc_1d
+PREHOOK: Output: default@loc_orc_1d@year=2002
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_1d
 POSTHOOK: Input: default@loc_orc_1d@year=2002
+POSTHOOK: Output: default@loc_orc_1d
+POSTHOOK: Output: default@loc_orc_1d@year=2002
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') state
 PREHOOK: type: DESCTABLE
@@ -529,21 +537,29 @@ PREHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statisti
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_1d
 PREHOOK: Input: default@loc_orc_1d@year=2000
+PREHOOK: Output: default@loc_orc_1d
+PREHOOK: Output: default@loc_orc_1d@year=2000
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_1d
 POSTHOOK: Input: default@loc_orc_1d@year=2000
+POSTHOOK: Output: default@loc_orc_1d
+POSTHOOK: Output: default@loc_orc_1d@year=2000
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_1d partition(year='2003') compute statistics for columns state
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_1d
 PREHOOK: Input: default@loc_orc_1d@year=2003
+PREHOOK: Output: default@loc_orc_1d
+PREHOOK: Output: default@loc_orc_1d@year=2003
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_1d partition(year='2003') compute statistics for columns state
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_1d
 POSTHOOK: Input: default@loc_orc_1d@year=2003
+POSTHOOK: Output: default@loc_orc_1d
+POSTHOOK: Output: default@loc_orc_1d@year=2003
 #### A masked pattern was here ####
 PREHOOK: query: explain extended select state from loc_orc_1d
 PREHOOK: type: QUERY
@@ -1009,21 +1025,29 @@ PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compu
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2001
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94086/year=2001
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2001
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001
 #### A masked pattern was here ####
 PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc_2d
 PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2002
+PREHOOK: Output: default@loc_orc_2d
+PREHOOK: Output: default@loc_orc_2d@zip=94087/year=2002
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc_2d
 POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2002
+POSTHOOK: Output: default@loc_orc_2d
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2002
 #### A masked pattern was here ####
 PREHOOK: query: explain extended select state from loc_orc_2d
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/fm-sketch.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fm-sketch.q.out b/ql/src/test/results/clientpositive/fm-sketch.q.out
index e519084..7691342 100644
--- a/ql/src/test/results/clientpositive/fm-sketch.q.out
+++ b/ql/src/test/results/clientpositive/fm-sketch.q.out
@@ -58,7 +58,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key
           Column Types: int
@@ -67,10 +68,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table n compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@n
+PREHOOK: Output: default@n
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table n compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@n
+POSTHOOK: Output: default@n
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted n key
 PREHOOK: type: DESCTABLE
@@ -151,7 +154,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key
           Column Types: int
@@ -160,10 +164,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table i compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@i
+PREHOOK: Output: default@i
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table i compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@i
+POSTHOOK: Output: default@i
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted i key
 PREHOOK: type: DESCTABLE
@@ -212,10 +218,12 @@ POSTHOOK: Lineage: i.key EXPRESSION [(src)src.FieldSchema(name:key, type:string,
 PREHOOK: query: analyze table i compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@i
+PREHOOK: Output: default@i
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table i compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@i
+POSTHOOK: Output: default@i
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted i key
 PREHOOK: type: DESCTABLE
@@ -264,10 +272,12 @@ POSTHOOK: Lineage: i.key EXPRESSION [(src)src.FieldSchema(name:key, type:string,
 PREHOOK: query: analyze table i compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@i
+PREHOOK: Output: default@i
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table i compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@i
+POSTHOOK: Output: default@i
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted i key
 PREHOOK: type: DESCTABLE
@@ -342,10 +352,12 @@ POSTHOOK: Lineage: i.key EXPRESSION [(values__tmp__table__5)values__tmp__table__
 PREHOOK: query: analyze table i compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@i
+PREHOOK: Output: default@i
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table i compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@i
+POSTHOOK: Output: default@i
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted i key
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby1.q.out b/ql/src/test/results/clientpositive/groupby1.q.out
index 46e09dd..d05feec 100644
--- a/ql/src/test/results/clientpositive/groupby1.q.out
+++ b/ql/src/test/results/clientpositive/groupby1.q.out
@@ -90,7 +90,8 @@ STAGE PLANS:
               name: default.dest_g1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby10.q.out b/ql/src/test/results/clientpositive/groupby10.q.out
index d48b7db..9f90a2c 100644
--- a/ql/src/test/results/clientpositive/groupby10.q.out
+++ b/ql/src/test/results/clientpositive/groupby10.q.out
@@ -131,7 +131,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -197,7 +198,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM INPUT
 INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key
@@ -376,7 +378,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -442,7 +445,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM INPUT
 INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, count(substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key
@@ -604,7 +608,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -617,7 +622,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM INPUT
 INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby11.q.out b/ql/src/test/results/clientpositive/groupby11.q.out
index 1d0e86a..2ab4c39 100644
--- a/ql/src/test/results/clientpositive/groupby11.q.out
+++ b/ql/src/test/results/clientpositive/groupby11.q.out
@@ -121,7 +121,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -189,7 +190,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 partition(ds='111')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby12.q.out b/ql/src/test/results/clientpositive/groupby12.q.out
index 921fc92..2f633f0 100644
--- a/ql/src/test/results/clientpositive/groupby12.q.out
+++ b/ql/src/test/results/clientpositive/groupby12.q.out
@@ -66,7 +66,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby1_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby1_limit.q.out b/ql/src/test/results/clientpositive/groupby1_limit.q.out
index 78a49eb..e5fa6d1 100644
--- a/ql/src/test/results/clientpositive/groupby1_limit.q.out
+++ b/ql/src/test/results/clientpositive/groupby1_limit.q.out
@@ -100,7 +100,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key ORDER BY src.key LIMIT 5
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby1_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby1_map.q.out b/ql/src/test/results/clientpositive/groupby1_map.q.out
index cc985a5..337c2e0 100644
--- a/ql/src/test/results/clientpositive/groupby1_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby1_map.q.out
@@ -71,7 +71,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out b/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out
index cc985a5..337c2e0 100644
--- a/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out
+++ b/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out
@@ -71,7 +71,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby1_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby1_map_skew.q.out b/ql/src/test/results/clientpositive/groupby1_map_skew.q.out
index 116744a..a140a02 100644
--- a/ql/src/test/results/clientpositive/groupby1_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby1_map_skew.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby1_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby1_noskew.q.out b/ql/src/test/results/clientpositive/groupby1_noskew.q.out
index 98c0d3c..702ea3e 100644
--- a/ql/src/test/results/clientpositive/groupby1_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby1_noskew.q.out
@@ -65,7 +65,8 @@ STAGE PLANS:
               name: default.dest_g1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby2_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby2_map.q.out b/ql/src/test/results/clientpositive/groupby2_map.q.out
index 0dcd810..427590d 100644
--- a/ql/src/test/results/clientpositive/groupby2_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby2_map.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out
index 64477db..0ab1985 100644
--- a/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)
@@ -175,7 +176,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby2_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby2_map_skew.q.out b/ql/src/test/results/clientpositive/groupby2_map_skew.q.out
index 813ae5c..bb5a09f 100644
--- a/ql/src/test/results/clientpositive/groupby2_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby2_map_skew.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby2_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby2_noskew.q.out b/ql/src/test/results/clientpositive/groupby2_noskew.q.out
index 5192db3..782a1a2 100644
--- a/ql/src/test/results/clientpositive/groupby2_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby2_noskew.q.out
@@ -66,7 +66,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out
index 1c24213..9a070ac 100644
--- a/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby3.q.out b/ql/src/test/results/clientpositive/groupby3.q.out
index 2ebeae4..2f2b533 100644
--- a/ql/src/test/results/clientpositive/groupby3.q.out
+++ b/ql/src/test/results/clientpositive/groupby3.q.out
@@ -105,7 +105,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby3_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby3_map.q.out b/ql/src/test/results/clientpositive/groupby3_map.q.out
index 07c122e..1bbf23d 100644
--- a/ql/src/test/results/clientpositive/groupby3_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby3_map.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out
index a4501f7..248a317 100644
--- a/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby3_map_multi_distinct.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby3_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby3_map_skew.q.out b/ql/src/test/results/clientpositive/groupby3_map_skew.q.out
index e02bdeb..8ba3a58 100644
--- a/ql/src/test/results/clientpositive/groupby3_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby3_map_skew.q.out
@@ -112,7 +112,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby3_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby3_noskew.q.out b/ql/src/test/results/clientpositive/groupby3_noskew.q.out
index 624fd2d..5c520bb 100644
--- a/ql/src/test/results/clientpositive/groupby3_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby3_noskew.q.out
@@ -82,7 +82,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out
index a1d403d..c5d121b 100644
--- a/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby3_noskew_multi_distinct.q.out
@@ -86,7 +86,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby4.q.out b/ql/src/test/results/clientpositive/groupby4.q.out
index 3f77e47..e8e7b88 100644
--- a/ql/src/test/results/clientpositive/groupby4.q.out
+++ b/ql/src/test/results/clientpositive/groupby4.q.out
@@ -84,7 +84,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby4_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby4_map.q.out b/ql/src/test/results/clientpositive/groupby4_map.q.out
index 97915e7..647242a 100644
--- a/ql/src/test/results/clientpositive/groupby4_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby4_map.q.out
@@ -65,7 +65,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby4_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby4_map_skew.q.out b/ql/src/test/results/clientpositive/groupby4_map_skew.q.out
index ae83f7a..0857f22 100644
--- a/ql/src/test/results/clientpositive/groupby4_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby4_map_skew.q.out
@@ -65,7 +65,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT count(1)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby4_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby4_noskew.q.out b/ql/src/test/results/clientpositive/groupby4_noskew.q.out
index c7db0d7..5827ef0 100644
--- a/ql/src/test/results/clientpositive/groupby4_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby4_noskew.q.out
@@ -61,7 +61,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby5.q.out b/ql/src/test/results/clientpositive/groupby5.q.out
index 9bf01ee..995d30e 100644
--- a/ql/src/test/results/clientpositive/groupby5.q.out
+++ b/ql/src/test/results/clientpositive/groupby5.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 
 SELECT src.key, sum(substr(src.value,5)) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby5_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby5_map.q.out b/ql/src/test/results/clientpositive/groupby5_map.q.out
index 5fbd3d7..393c4e2 100644
--- a/ql/src/test/results/clientpositive/groupby5_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby5_map.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby5_map_skew.q.out b/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
index 60b010b..67767f5 100644
--- a/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 SELECT sum(src.key)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby5_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby5_noskew.q.out b/ql/src/test/results/clientpositive/groupby5_noskew.q.out
index 612a0f6..dfecdb5 100644
--- a/ql/src/test/results/clientpositive/groupby5_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby5_noskew.q.out
@@ -71,7 +71,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 
 SELECT src.key, sum(substr(src.value,5)) 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby6.q.out b/ql/src/test/results/clientpositive/groupby6.q.out
index b790224..afc23ec 100644
--- a/ql/src/test/results/clientpositive/groupby6.q.out
+++ b/ql/src/test/results/clientpositive/groupby6.q.out
@@ -84,7 +84,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby6_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby6_map.q.out b/ql/src/test/results/clientpositive/groupby6_map.q.out
index 4ba3772..c0c8841 100644
--- a/ql/src/test/results/clientpositive/groupby6_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby6_map.q.out
@@ -66,7 +66,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby6_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby6_map_skew.q.out b/ql/src/test/results/clientpositive/groupby6_map_skew.q.out
index 5141c0d..5bb8695 100644
--- a/ql/src/test/results/clientpositive/groupby6_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby6_map_skew.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby6_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby6_noskew.q.out b/ql/src/test/results/clientpositive/groupby6_noskew.q.out
index fd796c7..0cafbd1 100644
--- a/ql/src/test/results/clientpositive/groupby6_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby6_noskew.q.out
@@ -61,7 +61,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby7_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_map.q.out b/ql/src/test/results/clientpositive/groupby7_map.q.out
index 0ef29cd..b7b2c8c 100644
--- a/ql/src/test/results/clientpositive/groupby7_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_map.q.out
@@ -102,7 +102,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -145,7 +146,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out
index 7c3b033..6f88027 100644
--- a/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out
@@ -99,7 +99,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -112,7 +113,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_map_skew.q.out b/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
index 4bfa52e..17eb283 100644
--- a/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_map_skew.q.out
@@ -128,7 +128,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -195,7 +196,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby7_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_noskew.q.out b/ql/src/test/results/clientpositive/groupby7_noskew.q.out
index 6178f58..2303b33 100644
--- a/ql/src/test/results/clientpositive/groupby7_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_noskew.q.out
@@ -90,7 +90,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -133,7 +134,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
index f38c428..ff29aec 100644
--- a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out
@@ -120,7 +120,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -164,7 +165,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby8.q.out b/ql/src/test/results/clientpositive/groupby8.q.out
index 1856a92..826064c 100644
--- a/ql/src/test/results/clientpositive/groupby8.q.out
+++ b/ql/src/test/results/clientpositive/groupby8.q.out
@@ -115,7 +115,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -181,7 +182,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -936,7 +938,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -1002,7 +1005,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby8_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby8_map.q.out b/ql/src/test/results/clientpositive/groupby8_map.q.out
index f683a8b..abe813c 100644
--- a/ql/src/test/results/clientpositive/groupby8_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby8_map.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -111,7 +112,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby8_map_skew.q.out b/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
index 5e60d3e..a4de8b4 100644
--- a/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby8_map_skew.q.out
@@ -127,7 +127,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -193,7 +194,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby8_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby8_noskew.q.out b/ql/src/test/results/clientpositive/groupby8_noskew.q.out
index f683a8b..abe813c 100644
--- a/ql/src/test/results/clientpositive/groupby8_noskew.q.out
+++ b/ql/src/test/results/clientpositive/groupby8_noskew.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -111,7 +112,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby9.q.out b/ql/src/test/results/clientpositive/groupby9.q.out
index 15ea185..28032e3 100644
--- a/ql/src/test/results/clientpositive/groupby9.q.out
+++ b/ql/src/test/results/clientpositive/groupby9.q.out
@@ -101,7 +101,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -143,7 +144,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -885,7 +887,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -927,7 +930,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -1669,7 +1673,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -1711,7 +1716,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -2454,7 +2460,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -2497,7 +2504,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(SUBSTR(SRC.value,5)) GROUP BY SRC.key
@@ -3239,7 +3247,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -3281,7 +3290,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_complex_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_complex_types.q.out b/ql/src/test/results/clientpositive/groupby_complex_types.q.out
index b16a4ad..107eea3 100644
--- a/ql/src/test/results/clientpositive/groupby_complex_types.q.out
+++ b/ql/src/test/results/clientpositive/groupby_complex_types.q.out
@@ -127,7 +127,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -166,7 +167,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-7
     Map Reduce
@@ -205,7 +207,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key), COUNT(1) GROUP BY ARRAY(SRC.key)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
index caa5395..bfddf74 100644
--- a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out
@@ -126,7 +126,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -191,7 +192,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM SRC
 INSERT OVERWRITE TABLE DEST1 SELECT ARRAY(SRC.key) as keyarray, COUNT(1) GROUP BY ARRAY(SRC.key) ORDER BY keyarray limit 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/groupby_cube1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_cube1.q.out b/ql/src/test/results/clientpositive/groupby_cube1.q.out
index 9acccf3..3824359 100644
--- a/ql/src/test/results/clientpositive/groupby_cube1.q.out
+++ b/ql/src/test/results/clientpositive/groupby_cube1.q.out
@@ -652,7 +652,8 @@ STAGE PLANS:
               name: default.t2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -720,7 +721,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM T1
 INSERT OVERWRITE TABLE T2 SELECT key, val, count(1) group by key, val with cube


[07/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
index 3a8ec94..281a05a 100644
--- a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
@@ -84,7 +84,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -97,7 +98,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -241,7 +243,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -254,7 +257,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -448,7 +452,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -461,7 +466,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -605,7 +611,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -618,7 +625,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -825,7 +833,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -838,7 +847,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -988,7 +998,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1001,7 +1012,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -1201,7 +1213,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1214,7 +1227,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -1364,7 +1378,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1377,7 +1392,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -1593,7 +1609,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1606,7 +1623,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1798,7 +1816,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1811,7 +1830,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -2053,7 +2073,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -2066,7 +2087,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -2258,7 +2280,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -2271,7 +2294,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-10
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -3374,7 +3398,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3387,7 +3412,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -3598,7 +3624,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3611,7 +3638,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -3841,7 +3869,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3854,7 +3883,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-12
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-7
     Map Reduce
@@ -4153,7 +4183,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -4166,7 +4197,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-12
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-7
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multi_insert_union_src.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_union_src.q.out b/ql/src/test/results/clientpositive/multi_insert_union_src.q.out
index 1ff1db5..f7f5d75 100644
--- a/ql/src/test/results/clientpositive/multi_insert_union_src.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_union_src.q.out
@@ -143,7 +143,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -179,7 +180,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src1 where key < 10 union all select * from src2 where key > 100) s
 insert overwrite table src_multi1 select key, value where key < 150 order by key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multi_insert_with_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_with_join2.q.out b/ql/src/test/results/clientpositive/multi_insert_with_join2.q.out
index 0231318..a554215 100644
--- a/ql/src/test/results/clientpositive/multi_insert_with_join2.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_with_join2.q.out
@@ -130,7 +130,8 @@ STAGE PLANS:
               name: default.join_result_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
@@ -212,7 +213,8 @@ STAGE PLANS:
               name: default.join_result_3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM T_A a LEFT JOIN T_B b ON a.id = b.id
@@ -312,7 +314,8 @@ STAGE PLANS:
               name: default.join_result_3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -325,7 +328,8 @@ STAGE PLANS:
               name: default.join_result_1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM T_A a LEFT JOIN T_B b ON a.id = b.id
@@ -425,7 +429,8 @@ STAGE PLANS:
               name: default.join_result_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -438,7 +443,8 @@ STAGE PLANS:
               name: default.join_result_3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM T_A a JOIN T_B b ON a.id = b.id
@@ -544,7 +550,8 @@ STAGE PLANS:
               name: default.join_result_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -557,7 +564,8 @@ STAGE PLANS:
               name: default.join_result_3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM T_A a JOIN T_B b ON a.id = b.id
@@ -663,7 +671,8 @@ STAGE PLANS:
               name: default.join_result_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -676,7 +685,8 @@ STAGE PLANS:
               name: default.join_result_3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM T_A a JOIN T_B b ON a.id = b.id
@@ -794,7 +804,8 @@ STAGE PLANS:
               name: default.join_result_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -807,7 +818,8 @@ STAGE PLANS:
               name: default.join_result_3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM T_A a JOIN T_B b ON a.id = b.id
@@ -925,7 +937,8 @@ STAGE PLANS:
               name: default.join_result_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -938,5 +951,6 @@ STAGE PLANS:
               name: default.join_result_3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out b/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out
index 7af8c43..cd030e5 100644
--- a/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out
+++ b/ql/src/test/results/clientpositive/multigroupby_singlemr.q.out
@@ -126,7 +126,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -169,7 +170,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -259,7 +261,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -302,7 +305,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -392,7 +396,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -435,7 +440,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -522,7 +528,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -535,7 +542,8 @@ STAGE PLANS:
               name: default.dest4
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -646,7 +654,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-5
     Map Reduce
@@ -689,7 +698,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-7
     Map Reduce
@@ -732,5 +742,6 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/nonmr_fetch.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nonmr_fetch.q.out b/ql/src/test/results/clientpositive/nonmr_fetch.q.out
index d2144b1..dffa80d 100644
--- a/ql/src/test/results/clientpositive/nonmr_fetch.q.out
+++ b/ql/src/test/results/clientpositive/nonmr_fetch.q.out
@@ -1090,7 +1090,8 @@ STAGE PLANS:
           name: default.srcx
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain analyze table src compute statistics
 PREHOOK: type: QUERY
@@ -1109,7 +1110,8 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain select * from src join src src2 on src.key=src2.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out b/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out
index d01461b..d6a8da5 100644
--- a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out
@@ -70,7 +70,8 @@ STAGE PLANS:
               name: default.insert
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100
 PREHOOK: type: QUERY
@@ -155,7 +156,8 @@ STAGE PLANS:
               name: default.insert
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100
 PREHOOK: type: QUERY
@@ -249,7 +251,8 @@ STAGE PLANS:
               name: default.insert
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/notable_alias1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/notable_alias1.q.out b/ql/src/test/results/clientpositive/notable_alias1.q.out
index 677545d..ff1778b 100644
--- a/ql/src/test/results/clientpositive/notable_alias1.q.out
+++ b/ql/src/test/results/clientpositive/notable_alias1.q.out
@@ -72,7 +72,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/notable_alias2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/notable_alias2.q.out b/ql/src/test/results/clientpositive/notable_alias2.q.out
index 66d0b2a..2f8b0e9 100644
--- a/ql/src/test/results/clientpositive/notable_alias2.q.out
+++ b/ql/src/test/results/clientpositive/notable_alias2.q.out
@@ -72,7 +72,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/nullformatCTAS.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nullformatCTAS.q.out b/ql/src/test/results/clientpositive/nullformatCTAS.q.out
index cda0965..723a4c9 100644
--- a/ql/src/test/results/clientpositive/nullformatCTAS.q.out
+++ b/ql/src/test/results/clientpositive/nullformatCTAS.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
           name: default.null_tab3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/optimize_filter_literal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/optimize_filter_literal.q.out b/ql/src/test/results/clientpositive/optimize_filter_literal.q.out
index 32e2bf6..ad7e0a4 100644
--- a/ql/src/test/results/clientpositive/optimize_filter_literal.q.out
+++ b/ql/src/test/results/clientpositive/optimize_filter_literal.q.out
@@ -90,11 +90,15 @@ PREHOOK: query: analyze table tab_part partition (ds='2008-04-08') compute stati
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab_part
 PREHOOK: Input: default@tab_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part
+PREHOOK: Output: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab_part partition (ds='2008-04-08') compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab_part
 POSTHOOK: Input: default@tab_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE
 PREHOOK: type: CREATETABLE
@@ -122,11 +126,15 @@ PREHOOK: query: analyze table tab partition (ds='2008-04-08') compute statistics
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab
 PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Output: default@tab
+PREHOOK: Output: default@tab@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table tab partition (ds='2008-04-08') compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab
 POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Output: default@tab
+POSTHOOK: Output: default@tab@ds=2008-04-08
 #### A masked pattern was here ####
 Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * from

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_createas1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_createas1.q.out b/ql/src/test/results/clientpositive/orc_createas1.q.out
index 506f39d..ecfad35 100644
--- a/ql/src/test/results/clientpositive/orc_createas1.q.out
+++ b/ql/src/test/results/clientpositive/orc_createas1.q.out
@@ -108,7 +108,8 @@ STAGE PLANS:
           name: default.orc_createas1b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator
@@ -274,7 +275,8 @@ STAGE PLANS:
           name: default.orc_createas1c
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge1.q.out b/ql/src/test/results/clientpositive/orc_merge1.q.out
index a83e85b..936e93f 100644
--- a/ql/src/test/results/clientpositive/orc_merge1.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge1.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
@@ -173,7 +174,8 @@ STAGE PLANS:
               name: default.orcfile_merge1b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -287,7 +289,8 @@ STAGE PLANS:
               name: default.orcfile_merge1c
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge10.q.out b/ql/src/test/results/clientpositive/orc_merge10.q.out
index 607aaeb..94956e8 100644
--- a/ql/src/test/results/clientpositive/orc_merge10.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge10.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
@@ -173,7 +174,8 @@ STAGE PLANS:
               name: default.orcfile_merge1b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -287,7 +289,8 @@ STAGE PLANS:
               name: default.orcfile_merge1c
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator
@@ -418,7 +421,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge2.q.out b/ql/src/test/results/clientpositive/orc_merge2.q.out
index d4c474f..5e8a1f0 100644
--- a/ql/src/test/results/clientpositive/orc_merge2.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge2.q.out
@@ -78,7 +78,8 @@ STAGE PLANS:
               name: default.orcfile_merge2a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge3.q.out b/ql/src/test/results/clientpositive/orc_merge3.q.out
index 7bf12c6..e527f11 100644
--- a/ql/src/test/results/clientpositive/orc_merge3.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge3.q.out
@@ -104,7 +104,8 @@ STAGE PLANS:
               name: default.orcfile_merge3b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge4.q.out b/ql/src/test/results/clientpositive/orc_merge4.q.out
index 828f204..c063fc3 100644
--- a/ql/src/test/results/clientpositive/orc_merge4.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge4.q.out
@@ -122,7 +122,8 @@ STAGE PLANS:
               name: default.orcfile_merge3b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge5.q.out b/ql/src/test/results/clientpositive/orc_merge5.q.out
index fe24e16..069b857 100644
--- a/ql/src/test/results/clientpositive/orc_merge5.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge5.q.out
@@ -66,7 +66,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY
@@ -160,7 +161,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator
@@ -273,7 +275,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5b concatenate
 PREHOOK: type: ALTER_TABLE_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge6.q.out b/ql/src/test/results/clientpositive/orc_merge6.q.out
index d998f4c..8d11589 100644
--- a/ql/src/test/results/clientpositive/orc_merge6.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge6.q.out
@@ -69,7 +69,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY
@@ -208,7 +209,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator
@@ -408,7 +410,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out
index a83e85b..936e93f 100644
--- a/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
@@ -173,7 +174,8 @@ STAGE PLANS:
               name: default.orcfile_merge1b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -287,7 +289,8 @@ STAGE PLANS:
               name: default.orcfile_merge1c
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Merge File Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
index aac4d15..01c9df8 100644
--- a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
@@ -65,7 +65,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
index 4b36e97..86d5d3c 100644
--- a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
@@ -64,7 +64,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
 PREHOOK: type: QUERY
@@ -277,7 +278,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/outer_reference_windowed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/outer_reference_windowed.q.out b/ql/src/test/results/clientpositive/outer_reference_windowed.q.out
index 06d8145..aef80c5 100644
--- a/ql/src/test/results/clientpositive/outer_reference_windowed.q.out
+++ b/ql/src/test/results/clientpositive/outer_reference_windowed.q.out
@@ -93,27 +93,67 @@ POSTHOOK: Lineage: e011_03.c2 SIMPLE [(e011_01)e011_01.FieldSchema(name:c2, type
 PREHOOK: query: ANALYZE TABLE e011_01 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_01
+PREHOOK: Output: default@e011_01
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_01 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_01
+POSTHOOK: Output: default@e011_01
 #### A masked pattern was here ####
 PREHOOK: query: ANALYZE TABLE e011_02 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_02
+PREHOOK: Output: default@e011_02
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_02 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_02
+POSTHOOK: Output: default@e011_02
 #### A masked pattern was here ####
 PREHOOK: query: ANALYZE TABLE e011_03 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@e011_03
+PREHOOK: Output: default@e011_03
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE e011_03 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@e011_03
+POSTHOOK: Output: default@e011_03
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted e011_01
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@e011_01
+POSTHOOK: query: describe formatted e011_01
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@e011_01
+# col_name            	data_type           	comment             
+c1                  	decimal(15,2)       	                    
+c2                  	decimal(15,2)       	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\"}}
+	numFiles            	1                   
+	numRows             	4                   
+	rawDataSize         	12                  
+	totalSize           	16                  
 #### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
 PREHOOK: query: explain select sum(sum(c1)) over() from e011_01
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select sum(sum(c1)) over() from e011_01
@@ -129,11 +169,11 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: c1 (type: decimal(15,2))
               outputColumnNames: c1
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(c1)
                 mode: hash
@@ -239,22 +279,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: c1 (type: decimal(15,2)), c2 (type: decimal(15,2))
               outputColumnNames: c1, c2
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(c1)
                 keys: c1 (type: decimal(15,2)), c2 (type: decimal(15,2))
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(15,2)), _col1 (type: decimal(15,2))
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: decimal(15,2)), _col1 (type: decimal(15,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: decimal(25,2))
       Reduce Operator Tree:
         Group By Operator
@@ -262,7 +302,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -278,13 +318,13 @@ STAGE PLANS:
               key expressions: _col1 (type: decimal(15,2)), _col0 (type: decimal(15,2))
               sort order: ++
               Map-reduce partition columns: _col1 (type: decimal(15,2))
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(25,2))
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey1 (type: decimal(15,2)), KEY.reducesinkkey0 (type: decimal(15,2)), VALUE._col0 (type: decimal(25,2))
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -304,14 +344,14 @@ STAGE PLANS:
                         name: sum
                         window function: GenericUDAFSumHiveDecimal
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sum_window_0 (type: decimal(35,2))
               outputColumnNames: _col0
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -365,19 +405,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(15,2)), c2 (type: decimal(15,2))
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(15,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(15,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: decimal(15,2))
           TableScan
             alias: e011_03
@@ -402,13 +442,13 @@ STAGE PLANS:
             0 _col0 (type: decimal(15,2))
             1 _col0 (type: decimal(15,2))
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             aggregations: sum(_col0)
             keys: _col0 (type: decimal(15,2)), _col1 (type: decimal(15,2))
             mode: hash
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -424,7 +464,7 @@ STAGE PLANS:
               key expressions: _col0 (type: decimal(15,2)), _col1 (type: decimal(15,2))
               sort order: ++
               Map-reduce partition columns: _col0 (type: decimal(15,2)), _col1 (type: decimal(15,2))
-              Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(25,2))
       Reduce Operator Tree:
         Group By Operator
@@ -432,7 +472,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -448,13 +488,13 @@ STAGE PLANS:
               key expressions: _col1 (type: decimal(15,2)), _col0 (type: decimal(15,2))
               sort order: ++
               Map-reduce partition columns: _col1 (type: decimal(15,2))
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: decimal(25,2))
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey1 (type: decimal(15,2)), KEY.reducesinkkey0 (type: decimal(15,2)), VALUE._col0 (type: decimal(25,2))
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -474,14 +514,14 @@ STAGE PLANS:
                         name: sum
                         window function: GenericUDAFSumHiveDecimal
                         window frame: RANGE PRECEDING(MAX)~CURRENT
-            Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: sum_window_0 (type: decimal(35,2))
               outputColumnNames: _col0
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -555,19 +595,19 @@ STAGE PLANS:
                   value expressions: _col1 (type: decimal(15,2))
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(15,2))
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(15,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(15,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -712,19 +752,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: e011_01
-            Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: c1 is not null (type: boolean)
-              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: decimal(15,2)), c2 (type: decimal(15,2))
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: decimal(15,2))
                   sort order: +
                   Map-reduce partition columns: _col0 (type: decimal(15,2))
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: decimal(15,2))
           TableScan
             alias: e011_03
@@ -750,13 +790,13 @@ STAGE PLANS:
             0 _col0 (type: decimal(15,2))
             1 _col0 (type: decimal(15,2))
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
           Group By Operator
             aggregations: corr(_col0, _col2)
             keys: _col1 (type: decimal(15,2)), _col3 (type: decimal(15,2))
             mode: hash
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -772,7 +812,7 @@ STAGE PLANS:
               key expressions: _col0 (type: decimal(15,2)), _col1 (type: decimal(15,2))
               sort order: ++
               Map-reduce partition columns: _col0 (type: decimal(15,2))
-              Statistics: Num rows: 4 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 13 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: struct<count:bigint,xavg:double,yavg:double,xvar:double,yvar:double,covar:double>)
       Reduce Operator Tree:
         Group By Operator
@@ -780,11 +820,11 @@ STAGE PLANS:
           keys: KEY._col0 (type: decimal(15,2)), KEY._col1 (type: decimal(15,2))
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: decimal(15,2)), _col0 (type: decimal(15,2)), _col2 (type: double)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
             PTF Operator
               Function definitions:
                   Input definition
@@ -804,14 +844,14 @@ STAGE PLANS:
                           name: sum
                           window function: GenericUDAFSumDouble
                           window frame: RANGE PRECEDING(MAX)~CURRENT
-              Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: sum_window_0 (type: double)
                 outputColumnNames: _col0
-                Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/parallel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parallel.q.out b/ql/src/test/results/clientpositive/parallel.q.out
index 459105e..31b1a0d 100644
--- a/ql/src/test/results/clientpositive/parallel.q.out
+++ b/ql/src/test/results/clientpositive/parallel.q.out
@@ -116,7 +116,8 @@ STAGE PLANS:
               name: default.src_a
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -129,7 +130,8 @@ STAGE PLANS:
               name: default.src_b
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select key, value from src group by key, value) s
 insert overwrite table src_a select s.key, s.value group by s.key, s.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/parallel_colstats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parallel_colstats.q.out b/ql/src/test/results/clientpositive/parallel_colstats.q.out
index 83fc14a..12ad5ee 100644
--- a/ql/src/test/results/clientpositive/parallel_colstats.q.out
+++ b/ql/src/test/results/clientpositive/parallel_colstats.q.out
@@ -28,12 +28,10 @@ STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-3 depends on stages: Stage-2
   Stage-0 depends on stages: Stage-3
-  Stage-4 depends on stages: Stage-0
-  Stage-8 depends on stages: Stage-4, Stage-5, Stage-6, Stage-7
-  Stage-9 depends on stages: Stage-4, Stage-5, Stage-6, Stage-7
+  Stage-4 depends on stages: Stage-0, Stage-5, Stage-7
   Stage-5 depends on stages: Stage-3
+  Stage-6 depends on stages: Stage-1, Stage-5, Stage-7
   Stage-1 depends on stages: Stage-3
-  Stage-6 depends on stages: Stage-1
   Stage-7 depends on stages: Stage-3
 
 STAGE PLANS:
@@ -150,22 +148,13 @@ STAGE PLANS:
               name: default.src_a
 
   Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-8
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string
           Table: default.src_a
 
-  Stage: Stage-9
-    Column Stats Work
-      Column Stats Desc:
-          Columns: key, value
-          Column Types: string, string
-          Table: default.src_b
-
   Stage: Stage-5
     Map Reduce
       Map Operator Tree:
@@ -188,6 +177,14 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
+  Stage: Stage-6
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value
+          Column Types: string, string
+          Table: default.src_b
+
   Stage: Stage-1
     Move Operator
       tables:
@@ -198,9 +195,6 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.src_b
 
-  Stage: Stage-6
-    Stats-Aggr Operator
-
   Stage: Stage-7
     Map Reduce
       Map Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/parallel_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parallel_join1.q.out b/ql/src/test/results/clientpositive/parallel_join1.q.out
index 8843661..0450e15 100644
--- a/ql/src/test/results/clientpositive/parallel_join1.q.out
+++ b/ql/src/test/results/clientpositive/parallel_join1.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/parallel_orderby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parallel_orderby.q.out b/ql/src/test/results/clientpositive/parallel_orderby.q.out
index 699dc16..3a80dfc 100644
--- a/ql/src/test/results/clientpositive/parallel_orderby.q.out
+++ b/ql/src/test/results/clientpositive/parallel_orderby.q.out
@@ -79,7 +79,8 @@ STAGE PLANS:
           name: default.total_ordered
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table total_ordered as select * from src5 order by key, value
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/partial_column_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partial_column_stats.q.out b/ql/src/test/results/clientpositive/partial_column_stats.q.out
index c1a78c3..44db81a 100644
--- a/ql/src/test/results/clientpositive/partial_column_stats.q.out
+++ b/ql/src/test/results/clientpositive/partial_column_stats.q.out
@@ -49,7 +49,8 @@ STAGE PLANS:
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-1
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: int, string
@@ -58,10 +59,12 @@ STAGE PLANS:
 PREHOOK: query: analyze table t1 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table t1 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted t1 value
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
index 9a0b4f4..adabbb2 100644
--- a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
+++ b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
@@ -281,11 +281,15 @@ PREHOOK: query: analyze table partcoltypenum partition (tint=110Y, sint=22000S,
 PREHOOK: type: QUERY
 PREHOOK: Input: default@partcoltypenum
 PREHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000
+PREHOOK: Output: default@partcoltypenum
+PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@partcoltypenum
 POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000
+POSTHOOK: Output: default@partcoltypenum
+POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) key
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out
index 691048c..3f69f8b 100644
--- a/ql/src/test/results/clientpositive/pcr.q.out
+++ b/ql/src/test/results/clientpositive/pcr.q.out
@@ -3649,7 +3649,8 @@ STAGE PLANS:
               name: default.pcr_t2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4
@@ -3870,7 +3871,8 @@ STAGE PLANS:
               name: default.pcr_t3
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-10
@@ -4279,7 +4281,8 @@ STAGE PLANS:
               name: default.pcr_t2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-4
@@ -4500,7 +4503,8 @@ STAGE PLANS:
               name: default.pcr_t3
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/pcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcs.q.out b/ql/src/test/results/clientpositive/pcs.q.out
index cc6ba5a..bbda382 100644
--- a/ql/src/test/results/clientpositive/pcs.q.out
+++ b/ql/src/test/results/clientpositive/pcs.q.out
@@ -70,6 +70,10 @@ PREHOOK: Input: default@pcs_t1
 PREHOOK: Input: default@pcs_t1@ds=2000-04-08
 PREHOOK: Input: default@pcs_t1@ds=2000-04-09
 PREHOOK: Input: default@pcs_t1@ds=2000-04-10
+PREHOOK: Output: default@pcs_t1
+PREHOOK: Output: default@pcs_t1@ds=2000-04-08
+PREHOOK: Output: default@pcs_t1@ds=2000-04-09
+PREHOOK: Output: default@pcs_t1@ds=2000-04-10
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table pcs_t1 partition(ds) compute statistics for columns
 POSTHOOK: type: QUERY
@@ -77,6 +81,10 @@ POSTHOOK: Input: default@pcs_t1
 POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
 POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
 POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
+POSTHOOK: Output: default@pcs_t1
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-10
 #### A masked pattern was here ####
 PREHOOK: query: explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_constant_expr.q.out b/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
index 494a1e2..e118a4e 100644
--- a/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
+++ b/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
@@ -64,7 +64,8 @@ STAGE PLANS:
               name: default.ppd_constant_expr
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce
@@ -200,7 +201,8 @@ STAGE PLANS:
               name: default.ppd_constant_expr
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/ppd_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_multi_insert.q.out b/ql/src/test/results/clientpositive/ppd_multi_insert.q.out
index ab50ea1..60b17ec 100644
--- a/ql/src/test/results/clientpositive/ppd_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/ppd_multi_insert.q.out
@@ -161,7 +161,8 @@ STAGE PLANS:
               name: default.mi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -174,7 +175,8 @@ STAGE PLANS:
               name: default.mi2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -190,7 +192,8 @@ STAGE PLANS:
               name: default.mi3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator
@@ -1426,7 +1429,8 @@ STAGE PLANS:
               name: default.mi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1439,7 +1443,8 @@ STAGE PLANS:
               name: default.mi2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -1455,7 +1460,8 @@ STAGE PLANS:
               name: default.mi3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/quote1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/quote1.q.out b/ql/src/test/results/clientpositive/quote1.q.out
index f873dc3..4a4fbd0 100644
--- a/ql/src/test/results/clientpositive/quote1.q.out
+++ b/ql/src/test/results/clientpositive/quote1.q.out
@@ -69,7 +69,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/quotedid_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/quotedid_stats.q.out b/ql/src/test/results/clientpositive/quotedid_stats.q.out
index 418f7f2..b82543e 100644
--- a/ql/src/test/results/clientpositive/quotedid_stats.q.out
+++ b/ql/src/test/results/clientpositive/quotedid_stats.q.out
@@ -43,10 +43,12 @@ Storage Desc Params:
 PREHOOK: query: analyze table t4 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t4
+PREHOOK: Output: default@t4
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table t4 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t4
+POSTHOOK: Output: default@t4
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted t4
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
index 9b29136..1482fde 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
@@ -213,7 +213,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/rcfile_null_value.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rcfile_null_value.q.out b/ql/src/test/results/clientpositive/rcfile_null_value.q.out
index c6a4a18..e2a8ae0 100644
--- a/ql/src/test/results/clientpositive/rcfile_null_value.q.out
+++ b/ql/src/test/results/clientpositive/rcfile_null_value.q.out
@@ -161,7 +161,8 @@ STAGE PLANS:
               name: default.dest1_rc
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/remove_exprs_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/remove_exprs_stats.q.out b/ql/src/test/results/clientpositive/remove_exprs_stats.q.out
index c256a5c..8f72a46 100644
--- a/ql/src/test/results/clientpositive/remove_exprs_stats.q.out
+++ b/ql/src/test/results/clientpositive/remove_exprs_stats.q.out
@@ -55,10 +55,12 @@ POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name
 PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year
 PREHOOK: type: QUERY
 PREHOOK: Input: default@loc_orc
+PREHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@loc_orc
+POSTHOOK: Output: default@loc_orc
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from loc_orc where locid < 30
 PREHOOK: type: QUERY
@@ -594,10 +596,12 @@ POSTHOOK: Lineage: t.s SIMPLE [(values__tmp__table__1)values__tmp__table__1.Fiel
 PREHOOK: query: analyze table t compute statistics for columns s
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t
+PREHOOK: Output: default@t
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table t compute statistics for columns s
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t
+POSTHOOK: Output: default@t
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from t where s is null
 PREHOOK: type: QUERY
@@ -723,10 +727,12 @@ POSTHOOK: Lineage: t.s SIMPLE [(values__tmp__table__2)values__tmp__table__2.Fiel
 PREHOOK: query: analyze table t compute statistics for columns s
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t
+PREHOOK: Output: default@t
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table t compute statistics for columns s
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t
+POSTHOOK: Output: default@t
 #### A masked pattern was here ####
 PREHOOK: query: explain select * from t where s is not null
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/rename_external_partition_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rename_external_partition_location.q.out b/ql/src/test/results/clientpositive/rename_external_partition_location.q.out
index def0aae..8e3c177 100644
--- a/ql/src/test/results/clientpositive/rename_external_partition_location.q.out
+++ b/ql/src/test/results/clientpositive/rename_external_partition_location.q.out
@@ -69,11 +69,15 @@ PREHOOK: query: ANALYZE TABLE ex_table PARTITION (part='part1') COMPUTE STATISTI
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ex_table
 PREHOOK: Input: default@ex_table@part=part1
+PREHOOK: Output: default@ex_table
+PREHOOK: Output: default@ex_table@part=part1
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE ex_table PARTITION (part='part1') COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@ex_table
 POSTHOOK: Input: default@ex_table@part=part1
+POSTHOOK: Output: default@ex_table
+POSTHOOK: Output: default@ex_table@part=part1
 #### A masked pattern was here ####
 PREHOOK: query: DESCRIBE FORMATTED ex_table
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/rename_table_update_column_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rename_table_update_column_stats.q.out b/ql/src/test/results/clientpositive/rename_table_update_column_stats.q.out
index e29bba4..b0f9069 100644
--- a/ql/src/test/results/clientpositive/rename_table_update_column_stats.q.out
+++ b/ql/src/test/results/clientpositive/rename_table_update_column_stats.q.out
@@ -47,10 +47,12 @@ PREHOOK: query: analyze table testtable1 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: statsdb1@testtable1
 #### A masked pattern was here ####
+PREHOOK: Output: statsdb1@testtable1
 POSTHOOK: query: analyze table testtable1 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: statsdb1@testtable1
 #### A masked pattern was here ####
+POSTHOOK: Output: statsdb1@testtable1
 PREHOOK: query: describe formatted statsdb1.testtable1 col1
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: statsdb1@testtable1
@@ -247,10 +249,12 @@ PREHOOK: query: analyze table testtable1 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: statsdb1@testtable1
 #### A masked pattern was here ####
+PREHOOK: Output: statsdb1@testtable1
 POSTHOOK: query: analyze table testtable1 compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: statsdb1@testtable1
 #### A masked pattern was here ####
+POSTHOOK: Output: statsdb1@testtable1
 PREHOOK: query: describe formatted statsdb1.testtable1 col1
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: statsdb1@testtable1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/sample1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample1.q.out b/ql/src/test/results/clientpositive/sample1.q.out
index dec9b23..72c32b4 100644
--- a/ql/src/test/results/clientpositive/sample1.q.out
+++ b/ql/src/test/results/clientpositive/sample1.q.out
@@ -166,7 +166,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/sample2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample2.q.out b/ql/src/test/results/clientpositive/sample2.q.out
index f54c573..e09fd50 100644
--- a/ql/src/test/results/clientpositive/sample2.q.out
+++ b/ql/src/test/results/clientpositive/sample2.q.out
@@ -165,7 +165,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/sample4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample4.q.out b/ql/src/test/results/clientpositive/sample4.q.out
index 675fda9..46a1c6f 100644
--- a/ql/src/test/results/clientpositive/sample4.q.out
+++ b/ql/src/test/results/clientpositive/sample4.q.out
@@ -165,7 +165,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/sample5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample5.q.out b/ql/src/test/results/clientpositive/sample5.q.out
index 583784e..15e89d0 100644
--- a/ql/src/test/results/clientpositive/sample5.q.out
+++ b/ql/src/test/results/clientpositive/sample5.q.out
@@ -166,7 +166,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/sample6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample6.q.out b/ql/src/test/results/clientpositive/sample6.q.out
index 36e6906..c568bd2 100644
--- a/ql/src/test/results/clientpositive/sample6.q.out
+++ b/ql/src/test/results/clientpositive/sample6.q.out
@@ -165,7 +165,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/sample7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample7.q.out b/ql/src/test/results/clientpositive/sample7.q.out
index f0d9088..6ad8ced 100644
--- a/ql/src/test/results/clientpositive/sample7.q.out
+++ b/ql/src/test/results/clientpositive/sample7.q.out
@@ -166,7 +166,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/skewjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoin.q.out b/ql/src/test/results/clientpositive/skewjoin.q.out
index 9df48ed..d349f98 100644
--- a/ql/src/test/results/clientpositive/skewjoin.q.out
+++ b/ql/src/test/results/clientpositive/skewjoin.q.out
@@ -198,7 +198,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoin_noskew.q.out b/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
index 243a840..d39acaf 100644
--- a/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
+++ b/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
@@ -146,7 +146,8 @@ STAGE PLANS:
           name: default.noskew
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table noskew as select a.* from src a join src b on a.key=b.key order by a.key limit 30
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out b/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out
index 09aaaf6..a5c0df9 100644
--- a/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out
+++ b/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out
@@ -186,7 +186,8 @@ STAGE PLANS:
           name: default.result
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: CREATE TABLE result AS SELECT a.* FROM skewtable a JOIN nonskewtable b ON a.key=b.key
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin9.q.out b/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
index 4ba3c28..d1d0314 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
@@ -280,7 +280,8 @@ STAGE PLANS:
           name: default.smb_mapjoin9_results
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out
index b3f36ad..56b7f7e 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out
@@ -46,6 +46,49 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: desc formatted smb_bucket_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@smb_bucket_1
+POSTHOOK: query: desc formatted smb_bucket_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@smb_bucket_1
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	SORTBUCKETCOLSPREFIX	TRUE                
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	208                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	1                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[Order(col:key, order:1)]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: select count(*) from smb_bucket_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from smb_bucket_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+#### A masked pattern was here ####
+5
 PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
index b53e670..b76e7a8 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
@@ -202,7 +202,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out
index 9af4683..6def3de 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out
@@ -204,7 +204,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
@@ -409,7 +410,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out
index 48e4d05..ac62af2 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1'
@@ -202,7 +203,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.value, a.key, a.value FROM test_table1 a WHERE a.ds = '1'
@@ -1364,5 +1366,6 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out
index c0fdfd3..8b568eb 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -153,7 +154,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -229,7 +231,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -306,7 +309,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -383,7 +387,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -459,5 +464,6 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out
index 36e8792..256a3c2 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out
@@ -72,7 +72,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2
 SELECT * FROM test_table1
@@ -212,7 +213,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2
 SELECT * FROM test_table1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
index 82f5804..a2a8660 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out
@@ -669,7 +669,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce


[09/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out
index 2e62fb0..ecc467a 100644
--- a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out
@@ -223,7 +223,8 @@ STAGE PLANS:
               name: default.over1k_part_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
 PREHOOK: type: QUERY
@@ -316,7 +317,8 @@ STAGE PLANS:
               name: default.over1k_part_limit_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
 PREHOOK: type: QUERY
@@ -389,7 +391,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
 PREHOOK: type: QUERY
@@ -462,7 +465,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_sort_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
 PREHOOK: type: QUERY
@@ -604,7 +608,8 @@ STAGE PLANS:
               name: default.over1k_part_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
 PREHOOK: type: QUERY
@@ -697,7 +702,8 @@ STAGE PLANS:
               name: default.over1k_part_limit_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
 PREHOOK: type: QUERY
@@ -770,7 +776,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
 PREHOOK: type: QUERY
@@ -843,7 +850,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_sort_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
 PREHOOK: type: QUERY
@@ -1369,7 +1377,8 @@ STAGE PLANS:
               name: default.over1k_part2_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i
 PREHOOK: type: QUERY
@@ -1443,7 +1452,8 @@ STAGE PLANS:
               name: default.over1k_part2_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27
 PREHOOK: type: QUERY
@@ -1538,7 +1548,8 @@ STAGE PLANS:
               name: default.over1k_part2_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t
 PREHOOK: type: QUERY
@@ -1616,7 +1627,8 @@ STAGE PLANS:
               name: default.over1k_part2_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t
 PREHOOK: type: QUERY
@@ -1695,7 +1707,8 @@ STAGE PLANS:
               name: default.over1k_part2_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i
 PREHOOK: type: QUERY
@@ -2071,7 +2084,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_sort2_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part_buck_sort2_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
 PREHOOK: type: QUERY
@@ -2144,7 +2158,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_sort2_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table over1k_part_buck_sort2_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out
index 5b80a32..764b58e 100644
--- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out
@@ -180,7 +180,8 @@ STAGE PLANS:
               name: default.over1k_part
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
 PREHOOK: type: QUERY
@@ -273,7 +274,8 @@ STAGE PLANS:
               name: default.over1k_part_limit
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
 PREHOOK: type: QUERY
@@ -346,7 +348,8 @@ STAGE PLANS:
               name: default.over1k_part_buck
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
 PREHOOK: type: QUERY
@@ -419,7 +422,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_sort
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
 PREHOOK: type: QUERY
@@ -561,7 +565,8 @@ STAGE PLANS:
               name: default.over1k_part
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
 PREHOOK: type: QUERY
@@ -654,7 +659,8 @@ STAGE PLANS:
               name: default.over1k_part_limit
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
 PREHOOK: type: QUERY
@@ -727,7 +733,8 @@ STAGE PLANS:
               name: default.over1k_part_buck
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
 PREHOOK: type: QUERY
@@ -800,7 +807,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_sort
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
 PREHOOK: type: QUERY
@@ -1326,7 +1334,8 @@ STAGE PLANS:
               name: default.over1k_part2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
 PREHOOK: type: QUERY
@@ -1400,7 +1409,8 @@ STAGE PLANS:
               name: default.over1k_part2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27
 PREHOOK: type: QUERY
@@ -1495,7 +1505,8 @@ STAGE PLANS:
               name: default.over1k_part2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
 PREHOOK: type: QUERY
@@ -1573,7 +1584,8 @@ STAGE PLANS:
               name: default.over1k_part2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
 PREHOOK: type: QUERY
@@ -1652,7 +1664,8 @@ STAGE PLANS:
               name: default.over1k_part2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
 PREHOOK: type: QUERY
@@ -2028,7 +2041,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_sort2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27
 PREHOOK: type: QUERY
@@ -2101,7 +2115,8 @@ STAGE PLANS:
               name: default.over1k_part_buck_sort2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27
 PREHOOK: type: QUERY
@@ -2470,7 +2485,8 @@ STAGE PLANS:
               name: default.over1k_part3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27
 PREHOOK: type: QUERY
@@ -2545,7 +2561,8 @@ STAGE PLANS:
               name: default.over1k_part3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100
 PREHOOK: type: QUERY
@@ -2620,7 +2637,8 @@ STAGE PLANS:
               name: default.over1k_part3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27
 PREHOOK: type: QUERY
@@ -2695,7 +2713,8 @@ STAGE PLANS:
               name: default.over1k_part3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and s="foo"
 PREHOOK: type: QUERY
@@ -2770,7 +2789,8 @@ STAGE PLANS:
               name: default.over1k_part3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where t=27 and s="foo"
 PREHOOK: type: QUERY
@@ -2845,7 +2865,8 @@ STAGE PLANS:
               name: default.over1k_part3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where i=100 and t=27 and s="foo"
 PREHOOK: type: QUERY
@@ -2903,7 +2924,8 @@ STAGE PLANS:
               name: default.over1k_part3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table over1k_part3 partition(s,t,i) select si,b,f,s,t,i from over1k where s="foo"
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out
index 568ab65..8b0fb2c 100644
--- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out
@@ -144,7 +144,8 @@ STAGE PLANS:
               name: default.ss_part
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
 select ss_net_paid_inc_tax,
@@ -378,7 +379,8 @@ STAGE PLANS:
               name: default.ss_part
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
 select ss_net_paid_inc_tax,
@@ -616,7 +618,8 @@ STAGE PLANS:
               name: default.ss_part
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
 select ss_net_paid_inc_tax,
@@ -848,7 +851,8 @@ STAGE PLANS:
               name: default.ss_part
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
 select ss_net_paid_inc_tax,
@@ -1141,7 +1145,8 @@ STAGE PLANS:
               name: default.ss_part_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk)
 select ss_net_paid_inc_tax,
@@ -1373,7 +1378,8 @@ STAGE PLANS:
               name: default.ss_part_orc
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk)
 select ss_net_paid_inc_tax,
@@ -1647,7 +1653,8 @@ STAGE PLANS:
               name: default.hive13_dp1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`)
 select
@@ -1777,7 +1784,8 @@ STAGE PLANS:
               name: default.hive13_dp1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`)
 select 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
index 16c0b29..02cadb7 100644
--- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
@@ -141,7 +141,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_part set value = 'bar' where key = 'foo' and ds='2008-04-08'
 PREHOOK: type: QUERY
@@ -236,7 +237,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_part set value = 'bar' where key = 'foo' and ds in ('2008-04-08')
 PREHOOK: type: QUERY
@@ -423,7 +425,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_part_sdpo set value = 'bar' where key = 'foo' and ds='2008-04-08'
 PREHOOK: type: QUERY
@@ -518,7 +521,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_part_sdpo set value = 'bar' where key = 'foo' and ds in ('2008-04-08')
 PREHOOK: type: QUERY
@@ -715,7 +719,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_2L_part set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
@@ -811,7 +816,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_2L_part set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY
@@ -936,7 +942,8 @@ STAGE PLANS:
           Write Type: DELETE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: delete from acid_2L_part where value = 'bar'
 PREHOOK: type: QUERY
@@ -1130,7 +1137,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_2L_part_sdpo set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
@@ -1226,7 +1234,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_2L_part_sdpo set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY
@@ -1351,7 +1360,8 @@ STAGE PLANS:
           Write Type: DELETE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: delete from acid_2L_part_sdpo where value = 'bar'
 PREHOOK: type: QUERY
@@ -1547,7 +1557,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_2L_part_sdpo_no_cp set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr=11
 PREHOOK: type: QUERY
@@ -1644,7 +1655,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: update acid_2L_part_sdpo_no_cp set value = 'bar' where key = 'foo' and ds='2008-04-08' and hr>=11
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
index f03a65f..5adf401 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
@@ -45,7 +45,7 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.src_orc_merge_test_part"}
@@ -80,7 +80,7 @@ Vertex dependency in root stage
 Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.src_orc_merge_test_part"}
@@ -3183,7 +3183,7 @@ Reducer 2 <- Map 1 (SIMPLE_EDGE)
 Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-4
       Create Table Operator:
         name:default.nzhang_CTAS1
@@ -3236,7 +3236,7 @@ Reducer 2 <- Map 1 (SIMPLE_EDGE)
 Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-4
       Create Table Operator:
         name:default.nzhang_ctas3
@@ -4838,7 +4838,7 @@ Reducer 4 <- Reducer 2 (SIMPLE_EDGE)
 Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
 
 Stage-4
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.part_4"}
@@ -4889,7 +4889,7 @@ Stage-4
                                 PartitionCols:_col2
                                  Please refer to the previous PTF Operator [PTF_3]
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.part_5"}
@@ -5254,7 +5254,7 @@ Vertex dependency in root stage
 Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.dest_j1"}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
index e5ba529..6b678f3 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
@@ -2607,7 +2607,7 @@ Reducer 8 <- Map 7 (SIMPLE_EDGE), Union 11 (SIMPLE_EDGE)
 Reducer 9 <- Map 14 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE), Union 4 (CONTAINS)
 
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.a"}
@@ -2840,13 +2840,13 @@ Stage-5
                   table:{"name:":"default.c"}
                    Please refer to the previous Select Operator [SEL_44]
 Stage-6
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.b"}
          Please refer to the previous Stage-4
 Stage-7
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-2
       Move Operator
         table:{"name:":"default.c"}
@@ -2910,7 +2910,7 @@ Reducer 6 <- Union 5 (SIMPLE_EDGE), Union 7 (CONTAINS)
 Reducer 8 <- Union 7 (SIMPLE_EDGE)
 
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.a"}
@@ -3172,13 +3172,13 @@ Stage-5
                 table:{"name:":"default.c"}
                  Please refer to the previous Group By Operator [GBY_120]
 Stage-6
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.b"}
          Please refer to the previous Stage-4
 Stage-7
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-2
       Move Operator
         table:{"name:":"default.c"}
@@ -3223,7 +3223,7 @@ Reducer 4 <- Union 3 (SIMPLE_EDGE)
 Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
 
 Stage-4
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.dest1"}
@@ -3278,7 +3278,7 @@ Stage-4
                           Output:["_col0","_col1","_col2"],aggregations:["count(DISTINCT substr(_col1, 5))"],keys:_col0, _col1
                            Please refer to the previous Group By Operator [GBY_13]
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.dest2"}
@@ -3397,7 +3397,7 @@ Reducer 4 <- Union 3 (SIMPLE_EDGE)
 Reducer 5 <- Union 3 (SIMPLE_EDGE)
 
 Stage-4
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.dest1"}
@@ -3470,7 +3470,7 @@ Stage-4
                     Output:["_col0","_col1","_col2"],aggregations:["count(DISTINCT KEY._col2:0._col0)"],keys:KEY._col0, KEY._col1
                   <- Please refer to the previous Union 3 [SIMPLE_EDGE]
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.dest2"}
@@ -3505,7 +3505,7 @@ Reducer 4 <- Union 3 (SIMPLE_EDGE)
 Reducer 5 <- Union 3 (SIMPLE_EDGE)
 
 Stage-4
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.dest1"}
@@ -3564,7 +3564,7 @@ Stage-4
                     Output:["_col0","_col1","_col2"],aggregations:["count(DISTINCT KEY._col2:0._col0)"],keys:KEY._col0, KEY._col1
                   <- Please refer to the previous Union 3 [SIMPLE_EDGE]
 Stage-5
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.dest2"}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/groupby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/groupby1.q.out b/ql/src/test/results/clientpositive/llap/groupby1.q.out
index 5917013..d58a9fd 100644
--- a/ql/src/test/results/clientpositive/llap/groupby1.q.out
+++ b/ql/src/test/results/clientpositive/llap/groupby1.q.out
@@ -95,7 +95,8 @@ STAGE PLANS:
               name: default.dest_g1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/groupby2.q.out b/ql/src/test/results/clientpositive/llap/groupby2.q.out
index f94db4e..5ab6235 100644
--- a/ql/src/test/results/clientpositive/llap/groupby2.q.out
+++ b/ql/src/test/results/clientpositive/llap/groupby2.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.dest_g2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/groupby3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/groupby3.q.out b/ql/src/test/results/clientpositive/llap/groupby3.q.out
index 3495de6..897946b 100644
--- a/ql/src/test/results/clientpositive/llap/groupby3.q.out
+++ b/ql/src/test/results/clientpositive/llap/groupby3.q.out
@@ -110,7 +110,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1 SELECT 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/insert1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert1.q.out b/ql/src/test/results/clientpositive/llap/insert1.q.out
index 0e27f9d..bc62682 100644
--- a/ql/src/test/results/clientpositive/llap/insert1.q.out
+++ b/ql/src/test/results/clientpositive/llap/insert1.q.out
@@ -76,7 +76,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1)
 PREHOOK: type: QUERY
@@ -130,7 +131,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create database x
 PREHOOK: type: CREATEDATABASE
@@ -198,7 +200,8 @@ STAGE PLANS:
               name: x.insert1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain insert into table default.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1)
 PREHOOK: type: QUERY
@@ -252,7 +255,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 from insert2
@@ -329,7 +333,8 @@ STAGE PLANS:
               name: default.insert1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -342,7 +347,8 @@ STAGE PLANS:
               name: x.insert1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: CREATE DATABASE db2
 PREHOOK: type: CREATEDATABASE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_into1.q.out b/ql/src/test/results/clientpositive/llap/insert_into1.q.out
index df72ca1..61297f0 100644
--- a/ql/src/test/results/clientpositive/llap/insert_into1.q.out
+++ b/ql/src/test/results/clientpositive/llap/insert_into1.q.out
@@ -82,7 +82,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 100
 PREHOOK: type: QUERY
@@ -204,7 +205,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 100
 PREHOOK: type: QUERY
@@ -326,7 +328,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 10
 PREHOOK: type: QUERY
@@ -426,7 +429,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table insert_into1 select 1, 'a'
 PREHOOK: type: QUERY
@@ -488,7 +492,8 @@ STAGE PLANS:
               name: default.insert_into1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into insert_into1 select 2, 'b'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_into2.q.out b/ql/src/test/results/clientpositive/llap/insert_into2.q.out
index a42c651..ef2b875 100644
--- a/ql/src/test/results/clientpositive/llap/insert_into2.q.out
+++ b/ql/src/test/results/clientpositive/llap/insert_into2.q.out
@@ -88,7 +88,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src order by key limit 100
 PREHOOK: type: QUERY
@@ -251,7 +252,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
   SELECT * FROM src order by key LIMIT 100
@@ -383,7 +385,8 @@ STAGE PLANS:
               name: default.insert_into2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
   SELECT * FROM src order by key LIMIT 50

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join1.q.out b/ql/src/test/results/clientpositive/llap/join1.q.out
index 724481b..661f55c 100644
--- a/ql/src/test/results/clientpositive/llap/join1.q.out
+++ b/ql/src/test/results/clientpositive/llap/join1.q.out
@@ -105,7 +105,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out b/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
index 2efe092..140f87e 100644
--- a/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
@@ -364,7 +364,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
@@ -932,7 +933,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
@@ -1403,7 +1405,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
@@ -1881,7 +1884,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
@@ -2150,7 +2154,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
 SELECT res.key, x.value, res.value  
@@ -2406,7 +2411,8 @@ STAGE PLANS:
               name: default.dest_j2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
 SELECT res.key, y.value, res.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out b/ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out
index 23c28e0..edbf76d 100644
--- a/ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out
+++ b/ql/src/test/results/clientpositive/llap/list_bucket_dml_10.q.out
@@ -159,7 +159,8 @@ STAGE PLANS:
               name: default.list_bucketing_static_part
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/llap_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_stats.q.out b/ql/src/test/results/clientpositive/llap/llap_stats.q.out
index 4a48627..299d9b4 100644
--- a/ql/src/test/results/clientpositive/llap/llap_stats.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_stats.q.out
@@ -94,7 +94,6 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-2 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -149,10 +148,8 @@ STAGE PLANS:
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-2
-    Stats-Aggr Operator
-
-  Stage: Stage-3
-    Column Stats Work
+    Stats Work
+      Basic Stats NoJob Work:
       Column Stats Desc:
           Columns: ctinyint, csmallint
           Column Types: tinyint, smallint

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out
index 41f93cc..2b89d24 100644
--- a/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out
+++ b/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out
@@ -119,7 +119,8 @@ STAGE PLANS:
               name: default.nzhang_part1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -135,7 +136,8 @@ STAGE PLANS:
               name: default.nzhang_part2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from srcpart
 insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out b/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out
index f15226b..52b7353 100644
--- a/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out
+++ b/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.nzhang_part_bucket
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out
index fd24a05..5230b59 100644
--- a/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out
+++ b/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.nzhang_part3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/load_dyn_part5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/load_dyn_part5.q.out b/ql/src/test/results/clientpositive/llap/load_dyn_part5.q.out
index 8e2bf0c..8b98b8e 100644
--- a/ql/src/test/results/clientpositive/llap/load_dyn_part5.q.out
+++ b/ql/src/test/results/clientpositive/llap/load_dyn_part5.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.nzhang_part5
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part5 partition (value) select key, value from src
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/mapreduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapreduce1.q.out b/ql/src/test/results/clientpositive/llap/mapreduce1.q.out
index 37f92d9..0e94e71 100644
--- a/ql/src/test/results/clientpositive/llap/mapreduce1.q.out
+++ b/ql/src/test/results/clientpositive/llap/mapreduce1.q.out
@@ -88,7 +88,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/mapreduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapreduce2.q.out b/ql/src/test/results/clientpositive/llap/mapreduce2.q.out
index 71bbb7e..6485f58 100644
--- a/ql/src/test/results/clientpositive/llap/mapreduce2.q.out
+++ b/ql/src/test/results/clientpositive/llap/mapreduce2.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/merge1.q.out b/ql/src/test/results/clientpositive/llap/merge1.q.out
index ec794dc..e77a5df 100644
--- a/ql/src/test/results/clientpositive/llap/merge1.q.out
+++ b/ql/src/test/results/clientpositive/llap/merge1.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table dest1
 select key, count(1) from src group by key
@@ -513,7 +514,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table dest1 select key from test_src
 PREHOOK: type: QUERY
@@ -579,7 +581,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table dest1 select key from test_src
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/merge2.q.out b/ql/src/test/results/clientpositive/llap/merge2.q.out
index 6ad6864..25b1946 100644
--- a/ql/src/test/results/clientpositive/llap/merge2.q.out
+++ b/ql/src/test/results/clientpositive/llap/merge2.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table test1
 select key, count(1) from src group by key
@@ -513,7 +514,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table test1 select key from test_src
 PREHOOK: type: QUERY
@@ -579,7 +581,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table test1 select key from test_src
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out
index cd019b1..cfbe659 100644
--- a/ql/src/test/results/clientpositive/llap/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out
@@ -103,7 +103,8 @@ STAGE PLANS:
           micromanaged table: true
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 PREHOOK: type: QUERY
@@ -1574,10 +1575,10 @@ POSTHOOK: Output: default@multi1_mm@p=1
 POSTHOOK: Output: default@multi1_mm@p=455
 POSTHOOK: Output: default@multi1_mm@p=456
 POSTHOOK: Output: default@multi1_mm@p=457
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/multi_insert.q.out b/ql/src/test/results/clientpositive/llap/multi_insert.q.out
index 56d26b3..7f30afe 100644
--- a/ql/src/test/results/clientpositive/llap/multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/llap/multi_insert.q.out
@@ -89,7 +89,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -102,7 +103,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -232,7 +234,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -245,7 +248,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -375,7 +379,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -388,7 +393,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -518,7 +524,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -531,7 +538,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -679,7 +687,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -692,7 +701,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -833,7 +843,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -846,7 +857,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -987,7 +999,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1000,7 +1013,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -1141,7 +1155,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1154,7 +1169,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -1312,7 +1328,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1325,7 +1342,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1509,7 +1527,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1522,7 +1541,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1706,7 +1726,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1719,7 +1740,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1903,7 +1925,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1916,7 +1939,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
index 6d20939..df92c00 100644
--- a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
@@ -165,7 +165,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -178,7 +179,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C
@@ -431,7 +433,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -444,7 +447,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
@@ -673,7 +677,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -686,7 +691,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -699,7 +705,8 @@ STAGE PLANS:
               name: default.src_lv3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
@@ -983,7 +990,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -996,7 +1004,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -1009,7 +1018,8 @@ STAGE PLANS:
               name: default.src_lv3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C
@@ -1354,7 +1364,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1367,7 +1378,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -1380,7 +1392,8 @@ STAGE PLANS:
               name: default.src_lv3
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator
@@ -1393,7 +1406,8 @@ STAGE PLANS:
               name: default.src_lv4
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge1.q.out b/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
index ba29491..5669a5b 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
@@ -191,7 +192,8 @@ STAGE PLANS:
               name: default.orcfile_merge1b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez
@@ -320,7 +322,8 @@ STAGE PLANS:
               name: default.orcfile_merge1c
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge10.q.out b/ql/src/test/results/clientpositive/llap/orc_merge10.q.out
index dd5d1cb..4671e47 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge10.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge10.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
@@ -191,7 +192,8 @@ STAGE PLANS:
               name: default.orcfile_merge1b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez
@@ -320,7 +322,8 @@ STAGE PLANS:
               name: default.orcfile_merge1c
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez
@@ -459,7 +462,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: ALTER TABLE  orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge2.q.out b/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
index c38852a..53a6424 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.orcfile_merge2a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
     SELECT key, value, PMOD(HASH(key), 10) as two, 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge3.q.out b/ql/src/test/results/clientpositive/llap/orc_merge3.q.out
index c3ca701..dca37ec 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge3.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge3.q.out
@@ -99,7 +99,8 @@ STAGE PLANS:
               name: default.orcfile_merge3b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
     SELECT key, value FROM orcfile_merge3a

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge4.q.out b/ql/src/test/results/clientpositive/llap/orc_merge4.q.out
index a16d2bf..a2aa416 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge4.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge4.q.out
@@ -117,7 +117,8 @@ STAGE PLANS:
               name: default.orcfile_merge3b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
     SELECT key, value FROM orcfile_merge3a

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge5.q.out b/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
index 8de6935..fafba53 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
@@ -75,7 +75,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY
@@ -178,7 +179,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez
@@ -299,7 +301,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5b concatenate
 PREHOOK: type: ALTER_TABLE_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
index b71e149..e5672fd 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
@@ -78,7 +78,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY
@@ -226,7 +227,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez
@@ -434,7 +436,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge7.q.out b/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
index 1e644df..f224822 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
 PREHOOK: type: QUERY
@@ -255,7 +256,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez
@@ -540,7 +542,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out
index ba29491..5669a5b 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge_diff_fs.q.out
@@ -98,7 +98,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
@@ -191,7 +192,8 @@ STAGE PLANS:
               name: default.orcfile_merge1b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez
@@ -320,7 +322,8 @@ STAGE PLANS:
               name: default.orcfile_merge1c
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Tez

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
index b6c1201..ae7250a 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
@@ -74,7 +74,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
index a0472ce..c1822a1 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
 PREHOOK: type: QUERY
@@ -286,7 +287,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/parallel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parallel.q.out b/ql/src/test/results/clientpositive/llap/parallel.q.out
index aea9417..3beb340 100644
--- a/ql/src/test/results/clientpositive/llap/parallel.q.out
+++ b/ql/src/test/results/clientpositive/llap/parallel.q.out
@@ -121,7 +121,8 @@ STAGE PLANS:
               name: default.src_a
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -134,7 +135,8 @@ STAGE PLANS:
               name: default.src_b
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select key, value from src group by key, value) s
 insert overwrite table src_a select s.key, s.value group by s.key, s.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out b/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out
index e89bf2f..4ac3fbb 100644
--- a/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out
+++ b/ql/src/test/results/clientpositive/llap/parallel_colstats.q.out
@@ -29,8 +29,6 @@ STAGE DEPENDENCIES:
   Stage-3 depends on stages: Stage-2
   Stage-0 depends on stages: Stage-3
   Stage-4 depends on stages: Stage-0
-  Stage-6 depends on stages: Stage-4, Stage-5
-  Stage-7 depends on stages: Stage-4, Stage-5
   Stage-1 depends on stages: Stage-3
   Stage-5 depends on stages: Stage-1
 
@@ -181,22 +179,13 @@ STAGE PLANS:
               name: default.src_a
 
   Stage: Stage-4
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Column Stats Work
+    Stats Work
+      Basic Stats Work:
       Column Stats Desc:
           Columns: key, value
           Column Types: string, string
           Table: default.src_a
 
-  Stage: Stage-7
-    Column Stats Work
-      Column Stats Desc:
-          Columns: key, value
-          Column Types: string, string
-          Table: default.src_b
-
   Stage: Stage-1
     Move Operator
       tables:
@@ -208,7 +197,12 @@ STAGE PLANS:
               name: default.src_b
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value
+          Column Types: string, string
+          Table: default.src_b
 
 PREHOOK: query: from (select key, value from src group by key, value) s
 insert overwrite table src_a select s.key, s.value group by s.key, s.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ptf.q.out b/ql/src/test/results/clientpositive/llap/ptf.q.out
index c69ed1d..4f79e26 100644
--- a/ql/src/test/results/clientpositive/llap/ptf.q.out
+++ b/ql/src/test/results/clientpositive/llap/ptf.q.out
@@ -3181,7 +3181,8 @@ STAGE PLANS:
               name: default.part_4
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3194,7 +3195,8 @@ STAGE PLANS:
               name: default.part_5
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from noop(on part 
 partition by p_mfgr 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out b/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out
index 42d3bd0..066295f 100644
--- a/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out
+++ b/ql/src/test/results/clientpositive/llap/rcfile_createas1.q.out
@@ -97,7 +97,8 @@ STAGE PLANS:
           name: default.rcfile_createas1b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out b/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out
index 2bb8e8a..e2ecc76 100644
--- a/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out
+++ b/ql/src/test/results/clientpositive/llap/rcfile_merge2.q.out
@@ -73,7 +73,8 @@ STAGE PLANS:
               name: default.rcfile_merge2a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE rcfile_merge2a PARTITION (one='1', two, three)
     SELECT key, value, PMOD(HASH(key), 10) as two, 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/rcfile_merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/rcfile_merge3.q.out b/ql/src/test/results/clientpositive/llap/rcfile_merge3.q.out
index d027bd3..02ee0e0 100644
--- a/ql/src/test/results/clientpositive/llap/rcfile_merge3.q.out
+++ b/ql/src/test/results/clientpositive/llap/rcfile_merge3.q.out
@@ -99,7 +99,8 @@ STAGE PLANS:
               name: default.rcfile_merge3b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE rcfile_merge3b
     SELECT key, value FROM rcfile_merge3a

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/rcfile_merge4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/rcfile_merge4.q.out b/ql/src/test/results/clientpositive/llap/rcfile_merge4.q.out
index f4a0353..52c97d1 100644
--- a/ql/src/test/results/clientpositive/llap/rcfile_merge4.q.out
+++ b/ql/src/test/results/clientpositive/llap/rcfile_merge4.q.out
@@ -99,7 +99,8 @@ STAGE PLANS:
               name: default.rcfile_merge3b
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE rcfile_merge3b
     SELECT key, value FROM rcfile_merge3a

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out b/ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out
index 40524f1..65b74ee 100644
--- a/ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out
+++ b/ql/src/test/results/clientpositive/llap/reduce_deduplicate.q.out
@@ -175,7 +175,8 @@ STAGE PLANS:
               name: default.bucket5_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket5_1
@@ -388,6 +389,7 @@ STAGE PLANS:
               name: default.complex_tbl_1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/sample1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/sample1.q.out b/ql/src/test/results/clientpositive/llap/sample1.q.out
index 1a7fb32..5b69bb5 100644
--- a/ql/src/test/results/clientpositive/llap/sample1.q.out
+++ b/ql/src/test/results/clientpositive/llap/sample1.q.out
@@ -161,7 +161,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.*

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/skewjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/skewjoin.q.out b/ql/src/test/results/clientpositive/llap/skewjoin.q.out
index e53da81..b102bca 100644
--- a/ql/src/test/results/clientpositive/llap/skewjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/skewjoin.q.out
@@ -169,7 +169,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out
index c061df6..135f83e 100644
--- a/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
@@ -281,7 +282,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
 SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238
@@ -405,7 +407,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
 SELECT a.key, a.value FROM test_table2 a WHERE a.ds = '2'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out
index 1f4fcae..64763fa 100644
--- a/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out
index 91b8f81..07e669f 100644
--- a/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out
@@ -150,7 +150,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
@@ -1332,7 +1333,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
@@ -2530,7 +2532,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
@@ -2644,7 +2647,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/sqlmerge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/sqlmerge.q.out b/ql/src/test/results/clientpositive/llap/sqlmerge.q.out
index 5fc35d5..2a6a0f3 100644
--- a/ql/src/test/results/clientpositive/llap/sqlmerge.q.out
+++ b/ql/src/test/results/clientpositive/llap/sqlmerge.q.out
@@ -229,7 +229,8 @@ STAGE PLANS:
           Write Type: DELETE
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -243,7 +244,8 @@ STAGE PLANS:
           Write Type: UPDATE
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator
@@ -256,7 +258,8 @@ STAGE PLANS:
               name: default.merge_tmp_table
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -270,7 +273,8 @@ STAGE PLANS:
           Write Type: INSERT
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain merge into acidTbl as t using nonAcidOrcTbl s ON t.a = s.a
 WHEN NOT MATCHED THEN INSERT VALUES(s.a, s.b)
@@ -373,5 +377,6 @@ STAGE PLANS:
           Write Type: INSERT
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/stats11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/stats11.q.out b/ql/src/test/results/clientpositive/llap/stats11.q.out
index 2899d8a..66c87c3 100644
--- a/ql/src/test/results/clientpositive/llap/stats11.q.out
+++ b/ql/src/test/results/clientpositive/llap/stats11.q.out
@@ -54,7 +54,8 @@ STAGE PLANS:
               name: default.srcbucket_mapjoin_part
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
 PREHOOK: type: LOAD
@@ -544,7 +545,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
@@ -910,7 +912,8 @@ STAGE PLANS:
               name: default.bucketmapjoin_tmp_result
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/stats_noscan_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/llap/stats_noscan_1.q.out
index 6d3e674..306f0de 100644
--- a/ql/src/test/results/clientpositive/llap/stats_noscan_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/stats_noscan_1.q.out
@@ -44,7 +44,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan
 PREHOOK: type: QUERY
@@ -305,7 +306,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart_partial PARTITION(ds='2008-04-08') compute statistics noscan
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/temp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/temp_table.q.out b/ql/src/test/results/clientpositive/llap/temp_table.q.out
index 462edae..4cf47b5 100644
--- a/ql/src/test/results/clientpositive/llap/temp_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/temp_table.q.out
@@ -52,7 +52,8 @@ STAGE PLANS:
           isTemporary: true
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -124,7 +125,8 @@ STAGE PLANS:
           isTemporary: true
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/tez_dml.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_dml.q.out b/ql/src/test/results/clientpositive/llap/tez_dml.q.out
index 84c65a7..adccaae 100644
--- a/ql/src/test/results/clientpositive/llap/tez_dml.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_dml.q.out
@@ -84,7 +84,8 @@ STAGE PLANS:
           name: default.tmp_src
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -480,7 +481,8 @@ STAGE PLANS:
               name: default.tmp_src_part
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src
 PREHOOK: type: QUERY
@@ -917,7 +919,8 @@ STAGE PLANS:
               name: default.even
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -930,7 +933,8 @@ STAGE PLANS:
               name: default.odd
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out b/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
index 417d318..eee2ab6 100644
--- a/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
@@ -369,7 +369,8 @@ STAGE PLANS:
           name: default.ct_events1_test
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-0
@@ -1353,7 +1354,8 @@ STAGE PLANS:
           name: default.ct_events1_test
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-0


[21/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
deleted file mode 100644
index 1f28688..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java
+++ /dev/null
@@ -1,451 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Date;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
-import org.apache.hadoop.hive.metastore.columnstats.cache.DateColumnStatsDataInspector;
-import org.apache.hadoop.hive.metastore.columnstats.cache.DecimalColumnStatsDataInspector;
-import org.apache.hadoop.hive.metastore.columnstats.cache.DoubleColumnStatsDataInspector;
-import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
-import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
-import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.DriverContext;
-import org.apache.hadoop.hive.ql.QueryPlan;
-import org.apache.hadoop.hive.ql.QueryState;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
-import org.apache.hadoop.hive.ql.plan.ColumnStatsWork;
-import org.apache.hadoop.hive.ql.plan.api.StageType;
-import org.apache.hadoop.hive.serde2.io.DateWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * ColumnStatsTask implementation.
- **/
-
-public class ColumnStatsTask extends Task<ColumnStatsWork> implements Serializable {
-  private static final long serialVersionUID = 1L;
-  private FetchOperator ftOp;
-  private static transient final Logger LOG = LoggerFactory.getLogger(ColumnStatsTask.class);
-
-  public ColumnStatsTask() {
-    super();
-  }
-
-  @Override
-  public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext ctx,
-      CompilationOpContext opContext) {
-    super.initialize(queryState, queryPlan, ctx, opContext);
-    work.initializeForFetch(opContext);
-    try {
-      JobConf job = new JobConf(conf);
-      ftOp = new FetchOperator(work.getfWork(), job);
-    } catch (Exception e) {
-      LOG.error(StringUtils.stringifyException(e));
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void unpackBooleanStats(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj statsObj) {
-    long v = ((LongObjectInspector) oi).get(o);
-    if (fName.equals("counttrues")) {
-      statsObj.getStatsData().getBooleanStats().setNumTrues(v);
-    } else if (fName.equals("countfalses")) {
-      statsObj.getStatsData().getBooleanStats().setNumFalses(v);
-    } else if (fName.equals("countnulls")) {
-      statsObj.getStatsData().getBooleanStats().setNumNulls(v);
-    }
-  }
-
-  @SuppressWarnings("serial")
-  class UnsupportedDoubleException extends Exception {
-  }
-
-  private void unpackDoubleStats(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj statsObj) throws UnsupportedDoubleException {
-    if (fName.equals("countnulls")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getDoubleStats().setNumNulls(v);
-    } else if (fName.equals("numdistinctvalues")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getDoubleStats().setNumDVs(v);
-    } else if (fName.equals("max")) {
-      double d = ((DoubleObjectInspector) oi).get(o);
-      if (Double.isInfinite(d) || Double.isNaN(d)) {
-        throw new UnsupportedDoubleException();
-      }
-      statsObj.getStatsData().getDoubleStats().setHighValue(d);
-    } else if (fName.equals("min")) {
-      double d = ((DoubleObjectInspector) oi).get(o);
-      if (Double.isInfinite(d) || Double.isNaN(d)) {
-        throw new UnsupportedDoubleException();
-      }
-      statsObj.getStatsData().getDoubleStats().setLowValue(d);
-    } else if (fName.equals("ndvbitvector")) {
-      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
-      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
-      statsObj.getStatsData().getDoubleStats().setBitVectors(buf);
-    }
-  }
-
-  private void unpackDecimalStats(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj statsObj) {
-    if (fName.equals("countnulls")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getDecimalStats().setNumNulls(v);
-    } else if (fName.equals("numdistinctvalues")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getDecimalStats().setNumDVs(v);
-    } else if (fName.equals("max")) {
-      HiveDecimal d = ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o);
-      statsObj.getStatsData().getDecimalStats().setHighValue(convertToThriftDecimal(d));
-    } else if (fName.equals("min")) {
-      HiveDecimal d = ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o);
-      statsObj.getStatsData().getDecimalStats().setLowValue(convertToThriftDecimal(d));
-    } else if (fName.equals("ndvbitvector")) {
-      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
-      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
-      statsObj.getStatsData().getDecimalStats().setBitVectors(buf);
-    }
-  }
-
-  private Decimal convertToThriftDecimal(HiveDecimal d) {
-    return new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), (short)d.scale());
-  }
-
-  private void unpackLongStats(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj statsObj) {
-    if (fName.equals("countnulls")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getLongStats().setNumNulls(v);
-    } else if (fName.equals("numdistinctvalues")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getLongStats().setNumDVs(v);
-    } else if (fName.equals("max")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getLongStats().setHighValue(v);
-    } else if (fName.equals("min")) {
-      long  v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getLongStats().setLowValue(v);
-    } else if (fName.equals("ndvbitvector")) {
-      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
-      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
-      statsObj.getStatsData().getLongStats().setBitVectors(buf);
-    }
-  }
-
-  private void unpackStringStats(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj statsObj) {
-    if (fName.equals("countnulls")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getStringStats().setNumNulls(v);
-    } else if (fName.equals("numdistinctvalues")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getStringStats().setNumDVs(v);
-    } else if (fName.equals("avglength")) {
-      double d = ((DoubleObjectInspector) oi).get(o);
-      statsObj.getStatsData().getStringStats().setAvgColLen(d);
-    } else if (fName.equals("maxlength")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getStringStats().setMaxColLen(v);
-    } else if (fName.equals("ndvbitvector")) {
-      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
-      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
-      statsObj.getStatsData().getStringStats().setBitVectors(buf);
-    }
-  }
-
-  private void unpackBinaryStats(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj statsObj) {
-    if (fName.equals("countnulls")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getBinaryStats().setNumNulls(v);
-    } else if (fName.equals("avglength")) {
-      double d = ((DoubleObjectInspector) oi).get(o);
-      statsObj.getStatsData().getBinaryStats().setAvgColLen(d);
-    } else if (fName.equals("maxlength")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getBinaryStats().setMaxColLen(v);
-    }
-  }
-
-  private void unpackDateStats(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj statsObj) {
-    if (fName.equals("countnulls")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getDateStats().setNumNulls(v);
-    } else if (fName.equals("numdistinctvalues")) {
-      long v = ((LongObjectInspector) oi).get(o);
-      statsObj.getStatsData().getDateStats().setNumDVs(v);
-    } else if (fName.equals("max")) {
-      DateWritable v = ((DateObjectInspector) oi).getPrimitiveWritableObject(o);
-      statsObj.getStatsData().getDateStats().setHighValue(new Date(v.getDays()));
-    } else if (fName.equals("min")) {
-      DateWritable v = ((DateObjectInspector) oi).getPrimitiveWritableObject(o);
-      statsObj.getStatsData().getDateStats().setLowValue(new Date(v.getDays()));
-    } else if (fName.equals("ndvbitvector")) {
-      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
-      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
-      statsObj.getStatsData().getDateStats().setBitVectors(buf);
-    }
-  }
-
-  private void unpackPrimitiveObject (ObjectInspector oi, Object o, String fieldName,
-      ColumnStatisticsObj statsObj) throws UnsupportedDoubleException {
-    if (o == null) {
-      return;
-    }
-    // First infer the type of object
-    if (fieldName.equals("columntype")) {
-      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
-      String s = ((StringObjectInspector) poi).getPrimitiveJavaObject(o);
-      ColumnStatisticsData statsData = new ColumnStatisticsData();
-
-      if (s.equalsIgnoreCase("long")) {
-        LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
-        statsData.setLongStats(longStats);
-        statsObj.setStatsData(statsData);
-      } else if (s.equalsIgnoreCase("double")) {
-        DoubleColumnStatsDataInspector doubleStats = new DoubleColumnStatsDataInspector();
-        statsData.setDoubleStats(doubleStats);
-        statsObj.setStatsData(statsData);
-      } else if (s.equalsIgnoreCase("string")) {
-        StringColumnStatsDataInspector stringStats = new StringColumnStatsDataInspector();
-        statsData.setStringStats(stringStats);
-        statsObj.setStatsData(statsData);
-      } else if (s.equalsIgnoreCase("boolean")) {
-        BooleanColumnStatsData booleanStats = new BooleanColumnStatsData();
-        statsData.setBooleanStats(booleanStats);
-        statsObj.setStatsData(statsData);
-      } else if (s.equalsIgnoreCase("binary")) {
-        BinaryColumnStatsData binaryStats = new BinaryColumnStatsData();
-        statsData.setBinaryStats(binaryStats);
-        statsObj.setStatsData(statsData);
-      } else if (s.equalsIgnoreCase("decimal")) {
-        DecimalColumnStatsDataInspector decimalStats = new DecimalColumnStatsDataInspector();
-        statsData.setDecimalStats(decimalStats);
-        statsObj.setStatsData(statsData);
-      } else if (s.equalsIgnoreCase("date")) {
-        DateColumnStatsDataInspector dateStats = new DateColumnStatsDataInspector();
-        statsData.setDateStats(dateStats);
-        statsObj.setStatsData(statsData);
-      }
-    } else {
-      // invoke the right unpack method depending on data type of the column
-      if (statsObj.getStatsData().isSetBooleanStats()) {
-        unpackBooleanStats(oi, o, fieldName, statsObj);
-      } else if (statsObj.getStatsData().isSetLongStats()) {
-        unpackLongStats(oi, o, fieldName, statsObj);
-      } else if (statsObj.getStatsData().isSetDoubleStats()) {
-        unpackDoubleStats(oi,o,fieldName, statsObj);
-      } else if (statsObj.getStatsData().isSetStringStats()) {
-        unpackStringStats(oi, o, fieldName, statsObj);
-      } else if (statsObj.getStatsData().isSetBinaryStats()) {
-        unpackBinaryStats(oi, o, fieldName, statsObj);
-      } else if (statsObj.getStatsData().isSetDecimalStats()) {
-        unpackDecimalStats(oi, o, fieldName, statsObj);
-      } else if (statsObj.getStatsData().isSetDateStats()) {
-        unpackDateStats(oi, o, fieldName, statsObj);
-      }
-    }
-  }
-
-  private void unpackStructObject(ObjectInspector oi, Object o, String fName,
-      ColumnStatisticsObj cStatsObj) throws UnsupportedDoubleException {
-    if (oi.getCategory() != ObjectInspector.Category.STRUCT) {
-      throw new RuntimeException("Invalid object datatype : " + oi.getCategory().toString());
-    }
-
-    StructObjectInspector soi = (StructObjectInspector) oi;
-    List<? extends StructField> fields = soi.getAllStructFieldRefs();
-    List<Object> list = soi.getStructFieldsDataAsList(o);
-
-    for (int i = 0; i < fields.size(); i++) {
-      // Get the field objectInspector, fieldName and the field object.
-      ObjectInspector foi = fields.get(i).getFieldObjectInspector();
-      Object f = (list == null ? null : list.get(i));
-      String fieldName = fields.get(i).getFieldName();
-
-      if (foi.getCategory() == ObjectInspector.Category.PRIMITIVE) {
-        unpackPrimitiveObject(foi, f, fieldName, cStatsObj);
-      } else {
-        unpackStructObject(foi, f, fieldName, cStatsObj);
-      }
-    }
-  }
-
-  private List<ColumnStatistics> constructColumnStatsFromPackedRows(
-      Hive db) throws HiveException, MetaException, IOException {
-
-    String currentDb = work.getCurrentDatabaseName();
-    String tableName = work.getColStats().getTableName();
-    String partName = null;
-    List<String> colName = work.getColStats().getColName();
-    List<String> colType = work.getColStats().getColType();
-    boolean isTblLevel = work.getColStats().isTblLevel();
-
-    List<ColumnStatistics> stats = new ArrayList<ColumnStatistics>();
-    InspectableObject packedRow;
-    Table tbl = db.getTable(currentDb, tableName);
-    while ((packedRow = ftOp.getNextRow()) != null) {
-      if (packedRow.oi.getCategory() != ObjectInspector.Category.STRUCT) {
-        throw new HiveException("Unexpected object type encountered while unpacking row");
-      }
-
-      List<ColumnStatisticsObj> statsObjs = new ArrayList<ColumnStatisticsObj>();
-      StructObjectInspector soi = (StructObjectInspector) packedRow.oi;
-      List<? extends StructField> fields = soi.getAllStructFieldRefs();
-      List<Object> list = soi.getStructFieldsDataAsList(packedRow.o);
-
-      List<FieldSchema> partColSchema = tbl.getPartCols();
-      // Partition columns are appended at end, we only care about stats column
-      int numOfStatCols = isTblLevel ? fields.size() : fields.size() - partColSchema.size();
-      for (int i = 0; i < numOfStatCols; i++) {
-        // Get the field objectInspector, fieldName and the field object.
-        ObjectInspector foi = fields.get(i).getFieldObjectInspector();
-        Object f = (list == null ? null : list.get(i));
-        String fieldName = fields.get(i).getFieldName();
-        ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
-        statsObj.setColName(colName.get(i));
-        statsObj.setColType(colType.get(i));
-        try {
-          unpackStructObject(foi, f, fieldName, statsObj);
-          statsObjs.add(statsObj);
-        } catch (UnsupportedDoubleException e) {
-          // due to infinity or nan.
-          LOG.info("Because {} is infinite or NaN, we skip stats.",  colName.get(i));
-        }
-      }
-
-      if (!isTblLevel) {
-        List<String> partVals = new ArrayList<String>();
-        // Iterate over partition columns to figure out partition name
-        for (int i = fields.size() - partColSchema.size(); i < fields.size(); i++) {
-          Object partVal = ((PrimitiveObjectInspector)fields.get(i).getFieldObjectInspector()).
-              getPrimitiveJavaObject(list.get(i));
-          partVals.add(partVal == null ? // could be null for default partition
-            this.conf.getVar(ConfVars.DEFAULTPARTITIONNAME) : partVal.toString());
-        }
-        partName = Warehouse.makePartName(partColSchema, partVals);
-      }
-      String [] names = Utilities.getDbTableName(currentDb, tableName);
-      ColumnStatisticsDesc statsDesc = getColumnStatsDesc(names[0], names[1], partName, isTblLevel);
-      ColumnStatistics colStats = new ColumnStatistics();
-      colStats.setStatsDesc(statsDesc);
-      colStats.setStatsObj(statsObjs);
-      if (!statsObjs.isEmpty()) {
-        stats.add(colStats);
-      }
-    }
-    ftOp.clearFetchContext();
-    return stats;
-  }
-
-  private ColumnStatisticsDesc getColumnStatsDesc(String dbName, String tableName,
-      String partName, boolean isTblLevel)
-  {
-    ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
-    statsDesc.setDbName(dbName);
-    statsDesc.setTableName(tableName);
-    statsDesc.setIsTblLevel(isTblLevel);
-
-    if (!isTblLevel) {
-      statsDesc.setPartName(partName);
-    } else {
-      statsDesc.setPartName(null);
-    }
-    return statsDesc;
-  }
-
-  private int persistColumnStats(Hive db) throws HiveException, MetaException, IOException {
-    // Construct a column statistics object from the result
-    List<ColumnStatistics> colStats = constructColumnStatsFromPackedRows(db);
-    // Persist the column statistics object to the metastore
-    // Note, this function is shared for both table and partition column stats.
-    if (colStats.isEmpty()) {
-      return 0;
-    }
-    SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats);
-    if (work.getColStats() != null && work.getColStats().getNumBitVector() > 0) {
-      request.setNeedMerge(true);
-    }
-    db.setPartitionColumnStatistics(request);
-    return 0;
-  }
-
-  @Override
-  public int execute(DriverContext driverContext) {
-    if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
-      return 0;
-    }
-    try {
-      Hive db = getHive();
-      return persistColumnStats(db);
-    } catch (Exception e) {
-      LOG.error("Failed to run column stats task", e);
-    }
-    return 1;
-  }
-
-  @Override
-  public StageType getType() {
-    return StageType.COLUMNSTATS;
-  }
-
-  @Override
-  public String getName() {
-    return "COLUMNSTATS TASK";
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 2331498..b4989f1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.HivePartitioner;
-import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.RecordUpdater;
 import org.apache.hadoop.hive.ql.io.StatsProvidingRecordWriter;
 import org.apache.hadoop.hive.ql.io.StreamingOutputFormat;
@@ -975,7 +974,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         Object recId = ((StructObjectInspector)rowInspector).getStructFieldData(row, recIdField);
         int bucketProperty =
             bucketInspector.get(recIdInspector.getStructFieldData(recId, bucketField));
-        int bucketNum = 
+        int bucketNum =
           BucketCodec.determineVersion(bucketProperty).decodeWriterId(bucketProperty);
         writerOffset = 0;
         if (multiFileSpray) {
@@ -1452,7 +1451,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
     }
 
     StatsCollectionContext sContext = new StatsCollectionContext(hconf);
-    sContext.setStatsTmpDir(conf.getStatsTmpDir());
+    sContext.setStatsTmpDir(conf.getTmpStatsDir());
     if (!statsPublisher.connect(sContext)) {
       // just return, stats gathering should not block the main query
       LOG.error("StatsPublishing error: cannot connect to database");

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
deleted file mode 100644
index c333c49..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
+++ /dev/null
@@ -1,396 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec;
-
-import java.io.Serializable;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.DriverContext;
-import org.apache.hadoop.hive.ql.QueryPlan;
-import org.apache.hadoop.hive.ql.QueryState;
-import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
-import org.apache.hadoop.hive.ql.plan.StatsNoJobWork;
-import org.apache.hadoop.hive.ql.plan.api.StageType;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hive.common.util.ReflectionUtil;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.MapMaker;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * StatsNoJobTask is used in cases where stats collection is the only task for the given query (no
- * parent MR or Tez job). It is used in the following cases 1) ANALYZE with noscan for
- * file formats that implement StatsProvidingRecordReader interface: ORC format (implements
- * StatsProvidingRecordReader) stores column statistics for all columns in the file footer. Its much
- * faster to compute the table/partition statistics by reading the footer than scanning all the
- * rows. This task can be used for computing basic stats like numFiles, numRows, fileSize,
- * rawDataSize from ORC footer.
- **/
-public class StatsNoJobTask extends Task<StatsNoJobWork> implements Serializable {
-
-  private static final long serialVersionUID = 1L;
-  private static transient final Logger LOG = LoggerFactory.getLogger(StatsNoJobTask.class);
-  private ConcurrentMap<String, Partition> partUpdates;
-  private Table table;
-  private String tableFullName;
-  private JobConf jc = null;
-
-  public StatsNoJobTask() {
-    super();
-  }
-
-  @Override
-  public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext driverContext,
-      CompilationOpContext opContext) {
-    super.initialize(queryState, queryPlan, driverContext, opContext);
-    jc = new JobConf(conf);
-  }
-
-  @Override
-  public int execute(DriverContext driverContext) {
-
-    LOG.info("Executing stats (no job) task");
-
-    String tableName = "";
-    ExecutorService threadPool = null;
-    Hive db = getHive();
-    try {
-      tableName = work.getTableSpecs().tableName;
-      table = db.getTable(tableName);
-      int numThreads = HiveConf.getIntVar(conf, ConfVars.HIVE_STATS_GATHER_NUM_THREADS);
-      tableFullName = table.getFullyQualifiedName();
-      threadPool = Executors.newFixedThreadPool(numThreads,
-          new ThreadFactoryBuilder().setDaemon(true).setNameFormat("StatsNoJobTask-Thread-%d")
-              .build());
-      partUpdates = new MapMaker().concurrencyLevel(numThreads).makeMap();
-      LOG.info("Initialized threadpool for stats computation with {} threads", numThreads);
-    } catch (HiveException e) {
-      LOG.error("Cannot get table {}", tableName, e);
-      console.printError("Cannot get table " + tableName, e.toString());
-    }
-
-    return aggregateStats(threadPool, db);
-  }
-
-  @Override
-  public StageType getType() {
-    return StageType.STATS;
-  }
-
-  @Override
-  public String getName() {
-    return "STATS-NO-JOB";
-  }
-
-  class StatsCollection implements Runnable {
-
-    private final Partition partn;
-
-    public StatsCollection(Partition part) {
-      this.partn = part;
-    }
-
-    @Override
-    public void run() {
-
-      // get the list of partitions
-      org.apache.hadoop.hive.metastore.api.Partition tPart = partn.getTPartition();
-      Map<String, String> parameters = tPart.getParameters();
-
-      try {
-        Path dir = new Path(tPart.getSd().getLocation());
-        long numRows = 0;
-        long rawDataSize = 0;
-        long fileSize = 0;
-        long numFiles = 0;
-        FileSystem fs = dir.getFileSystem(conf);
-        FileStatus[] fileList = HiveStatsUtils.getFileStatusRecurse(dir, -1, fs);
-
-        boolean statsAvailable = false;
-        for(FileStatus file: fileList) {
-          if (!file.isDir()) {
-            InputFormat<?, ?> inputFormat = ReflectionUtil.newInstance(
-                partn.getInputFormatClass(), jc);
-            InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0,
-                new String[] { partn.getLocation() });
-            org.apache.hadoop.mapred.RecordReader<?, ?> recordReader =
-                inputFormat.getRecordReader(dummySplit, jc, Reporter.NULL);
-            StatsProvidingRecordReader statsRR;
-            if (recordReader instanceof StatsProvidingRecordReader) {
-              statsRR = (StatsProvidingRecordReader) recordReader;
-              rawDataSize += statsRR.getStats().getRawDataSize();
-              numRows += statsRR.getStats().getRowCount();
-              fileSize += file.getLen();
-              numFiles += 1;
-              statsAvailable = true;
-            }
-            recordReader.close();
-          }
-        }
-
-        if (statsAvailable) {
-          parameters.put(StatsSetupConst.ROW_COUNT, String.valueOf(numRows));
-          parameters.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(rawDataSize));
-          parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(fileSize));
-          parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(numFiles));
-
-          partUpdates.put(tPart.getSd().getLocation(), new Partition(table, tPart));
-
-          // printout console and debug logs
-          String threadName = Thread.currentThread().getName();
-          String msg = "Partition " + tableFullName + partn.getSpec() + " stats: ["
-              + toString(parameters) + ']';
-          LOG.debug("{}: {}", threadName, msg);
-          console.printInfo(msg);
-        } else {
-          String threadName = Thread.currentThread().getName();
-          String msg = "Partition " + tableFullName + partn.getSpec() + " does not provide stats.";
-          LOG.debug("{}: {}", threadName, msg);
-        }
-      } catch (Exception e) {
-        console.printInfo("[Warning] could not update stats for " + tableFullName + partn.getSpec()
-            + ".",
-            "Failed with exception " + e.getMessage() + "\n" + StringUtils.stringifyException(e));
-
-        // Before updating the partition params, if any partition params is null
-        // and if statsReliable is true then updatePartition() function  will fail
-        // the task by returning 1
-        if (work.isStatsReliable()) {
-          partUpdates.put(tPart.getSd().getLocation(), null);
-        }
-      }
-    }
-
-    private String toString(Map<String, String> parameters) {
-      StringBuilder builder = new StringBuilder();
-      for (String statType : StatsSetupConst.supportedStats) {
-        String value = parameters.get(statType);
-        if (value != null) {
-          if (builder.length() > 0) {
-            builder.append(", ");
-          }
-          builder.append(statType).append('=').append(value);
-        }
-      }
-      return builder.toString();
-    }
-
-  }
-
-  private int aggregateStats(ExecutorService threadPool, Hive db) {
-    int ret = 0;
-
-    try {
-      Collection<Partition> partitions = null;
-      if (work.getPrunedPartitionList() == null) {
-        partitions = getPartitionsList();
-      } else {
-        partitions = work.getPrunedPartitionList().getPartitions();
-      }
-
-      // non-partitioned table
-      if (partitions == null) {
-        org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable();
-        Map<String, String> parameters = tTable.getParameters();
-        try {
-          Path dir = new Path(tTable.getSd().getLocation());
-          LOG.debug("Aggregating stats for " + dir);
-          long numRows = 0;
-          long rawDataSize = 0;
-          long fileSize = 0;
-          long numFiles = 0;
-          FileSystem fs = dir.getFileSystem(conf);
-          FileStatus[] fileList = HiveStatsUtils.getFileStatusRecurse(dir, -1, fs);
-
-          boolean statsAvailable = false;
-          for(FileStatus file: fileList) {
-            LOG.debug("Computing stats for " + file);
-            if (!file.isDir()) {
-              InputFormat<?, ?> inputFormat = ReflectionUtil.newInstance(
-                  table.getInputFormatClass(), jc);
-              InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0, new String[] { table
-                  .getDataLocation().toString() });
-              if (file.getLen() == 0) {
-                numFiles += 1;
-                statsAvailable = true;
-              } else {
-                org.apache.hadoop.mapred.RecordReader<?, ?> recordReader =
-                    inputFormat.getRecordReader(dummySplit, jc, Reporter.NULL);
-                StatsProvidingRecordReader statsRR;
-                if (recordReader instanceof StatsProvidingRecordReader) {
-                  statsRR = (StatsProvidingRecordReader) recordReader;
-                  numRows += statsRR.getStats().getRowCount();
-                  rawDataSize += statsRR.getStats().getRawDataSize();
-                  fileSize += file.getLen();
-                  numFiles += 1;
-                  statsAvailable = true;
-                }
-                recordReader.close();
-              }
-            }
-          }
-
-          if (statsAvailable) {
-            parameters.put(StatsSetupConst.ROW_COUNT, String.valueOf(numRows));
-            parameters.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(rawDataSize));
-            parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(fileSize));
-            parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(numFiles));
-            EnvironmentContext environmentContext = new EnvironmentContext();
-            environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
-
-            db.alterTable(table, environmentContext);
-
-            String msg = "Table " + tableFullName + " stats: [" + toString(parameters) + ']';
-            if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
-              Utilities.FILE_OP_LOGGER.trace(msg);
-            }
-            console.printInfo(msg);
-            LOG.debug("Table {} does not provide stats.", tableFullName);
-          }
-        } catch (Exception e) {
-          console.printInfo("[Warning] could not update stats for " + tableFullName + ".",
-              "Failed with exception " + e.getMessage() + "\n" + StringUtils.stringifyException(e));
-        }
-      } else {
-
-        // Partitioned table
-        for (Partition partn : partitions) {
-          threadPool.execute(new StatsCollection(partn));
-        }
-
-        LOG.debug("Stats collection waiting for threadpool to shutdown..");
-        shutdownAndAwaitTermination(threadPool);
-        LOG.debug("Stats collection threadpool shutdown successful.");
-
-        ret = updatePartitions(db);
-      }
-
-    } catch (Exception e) {
-      // Fail the query if the stats are supposed to be reliable
-      if (work.isStatsReliable()) {
-        ret = -1;
-      }
-    }
-
-    // The return value of 0 indicates success,
-    // anything else indicates failure
-    return ret;
-  }
-
-  private int updatePartitions(Hive db) throws InvalidOperationException, HiveException {
-    if (!partUpdates.isEmpty()) {
-      List<Partition> updatedParts = Lists.newArrayList(partUpdates.values());
-      if (updatedParts.contains(null) && work.isStatsReliable()) {
-        LOG.debug("Stats requested to be reliable. Empty stats found and hence failing the task.");
-        return -1;
-      } else {
-        LOG.debug("Bulk updating partitions..");
-        EnvironmentContext environmentContext = new EnvironmentContext();
-        environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
-        db.alterPartitions(tableFullName, Lists.newArrayList(partUpdates.values()),
-            environmentContext);
-        LOG.debug("Bulk updated {} partitions.", partUpdates.values().size());
-      }
-    }
-    return 0;
-  }
-
-  private void shutdownAndAwaitTermination(ExecutorService threadPool) {
-
-    // Disable new tasks from being submitted
-    threadPool.shutdown();
-    try {
-
-      // Wait a while for existing tasks to terminate
-      while (!threadPool.awaitTermination(10, TimeUnit.SECONDS)) {
-        LOG.debug("Waiting for all stats tasks to finish...");
-      }
-      // Cancel currently executing tasks
-      threadPool.shutdownNow();
-
-      // Wait a while for tasks to respond to being cancelled
-      if (!threadPool.awaitTermination(100, TimeUnit.SECONDS)) {
-        LOG.debug("Stats collection thread pool did not terminate");
-      }
-    } catch (InterruptedException ie) {
-
-      // Cancel again if current thread also interrupted
-      threadPool.shutdownNow();
-
-      // Preserve interrupt status
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  private String toString(Map<String, String> parameters) {
-    StringBuilder builder = new StringBuilder();
-    for (String statType : StatsSetupConst.supportedStats) {
-      String value = parameters.get(statType);
-      if (value != null) {
-        if (builder.length() > 0) {
-          builder.append(", ");
-        }
-        builder.append(statType).append('=').append(value);
-      }
-    }
-    return builder.toString();
-  }
-
-  private List<Partition> getPartitionsList() throws HiveException {
-    if (work.getTableSpecs() != null) {
-      TableSpec tblSpec = work.getTableSpecs();
-      table = tblSpec.tableHandle;
-      if (!table.isPartitioned()) {
-        return null;
-      } else {
-        return tblSpec.partitions;
-      }
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
index 682b42c..567126e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
@@ -16,501 +16,143 @@
  * limitations under the License.
  */
 
-
 package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.DriverContext;
-import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.QueryPlan;
+import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
 import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
-import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
-import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
-import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
-import org.apache.hadoop.hive.ql.stats.StatsAggregator;
-import org.apache.hadoop.hive.ql.stats.StatsCollectionContext;
-import org.apache.hadoop.hive.ql.stats.StatsFactory;
-import org.apache.hadoop.hive.ql.stats.StatsPublisher;
-import org.apache.hadoop.hive.ql.stats.StatsUtils;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hive.ql.stats.BasicStatsNoJobTask;
+import org.apache.hadoop.hive.ql.stats.BasicStatsTask;
+import org.apache.hadoop.hive.ql.stats.ColStatsProcessor;
+import org.apache.hadoop.hive.ql.stats.IStatsProcessor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
- * StatsTask implementation. StatsTask mainly deals with "collectable" stats. These are
- * stats that require data scanning and are collected during query execution (unless the user
- * explicitly requests data scanning just for the purpose of stats computation using the "ANALYZE"
- * command. All other stats are computed directly by the MetaStore. The rationale being that the
- * MetaStore layer covers all Thrift calls and provides better guarantees about the accuracy of
- * those stats.
+ * StatsTask implementation.
  **/
-public class StatsTask extends Task<StatsWork> implements Serializable {
 
+public class StatsTask extends Task<StatsWork> implements Serializable {
   private static final long serialVersionUID = 1L;
   private static transient final Logger LOG = LoggerFactory.getLogger(StatsTask.class);
 
-  private Table table;
-  private Collection<Partition> dpPartSpecs;
-
   public StatsTask() {
     super();
-    dpPartSpecs = null;
   }
 
-  @Override
-  protected void receiveFeed(FeedType feedType, Object feedValue) {
-    // this method should be called by MoveTask when there are dynamic partitions generated
-    if (feedType == FeedType.DYNAMIC_PARTITIONS) {
-      dpPartSpecs = (Collection<Partition>) feedValue;
-    }
-  }
+  List<IStatsProcessor> processors = new ArrayList<>();
 
   @Override
-  public int execute(DriverContext driverContext) {
-    if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
-      return 0;
-    }
-    LOG.info("Executing stats task");
-    // Make sure that it is either an ANALYZE, INSERT OVERWRITE (maybe load) or CTAS command
-    short workComponentsPresent = 0;
-    if (work.getLoadTableDesc() != null) {
-      workComponentsPresent++;
-    }
-    if (work.getTableSpecs() != null) {
-      workComponentsPresent++;
+  public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext ctx,
+      CompilationOpContext opContext) {
+    super.initialize(queryState, queryPlan, ctx, opContext);
+
+    if (work.getBasicStatsWork() != null) {
+      BasicStatsTask task = new BasicStatsTask(conf, work.getBasicStatsWork());
+      task.followedColStats = work.hasColStats();
+      processors.add(0, task);
+    } else if (work.isFooterScan()) {
+      BasicStatsNoJobTask t = new BasicStatsNoJobTask(conf, work.getBasicStatsNoJobWork());
+      processors.add(0, t);
     }
-    if (work.getLoadFileDesc() != null) {
-      workComponentsPresent++;
+    if (work.hasColStats()) {
+      processors.add(new ColStatsProcessor(work.getColStats(), conf));
     }
 
-    assert (workComponentsPresent == 1);
-
-    String tableName = "";
-    Hive hive = getHive();
-    try {
-      if (work.getLoadTableDesc() != null) {
-        tableName = work.getLoadTableDesc().getTable().getTableName();
-      } else if (work.getTableSpecs() != null){
-        tableName = work.getTableSpecs().tableName;
-      } else {
-        tableName = work.getLoadFileDesc().getDestinationCreateTable();
-      }
-
-      table = hive.getTable(tableName);
-
-    } catch (HiveException e) {
-      LOG.error("Cannot get table {}", tableName, e);
-      console.printError("Cannot get table " + tableName, e.toString());
+    for (IStatsProcessor p : processors) {
+      p.initialize(opContext);
     }
-
-    return aggregateStats(hive);
-
   }
 
-  @Override
-  public StageType getType() {
-    return StageType.STATS;
-  }
 
   @Override
-  public String getName() {
-    return "STATS";
-  }
-
-  private int aggregateStats(Hive db) {
-
-    StatsAggregator statsAggregator = null;
+  public int execute(DriverContext driverContext) {
+    if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
+      return 0;
+    }
+    if (work.isAggregating() && work.isFooterScan()) {
+      throw new RuntimeException("Can not have both basic stats work and stats no job work!");
+    }
     int ret = 0;
-    StatsCollectionContext scc = null;
-    EnvironmentContext environmentContext = null;
     try {
-      // Stats setup:
-      final Warehouse wh = new Warehouse(conf);
-      if (!getWork().getNoStatsAggregator() && !getWork().isNoScanAnalyzeCommand()) {
-        try {
-          scc = getContext();
-          statsAggregator = createStatsAggregator(scc, conf);
-        } catch (HiveException e) {
-          if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
-            throw e;
-          }
-          console.printError(ErrorMsg.STATS_SKIPPING_BY_ERROR.getErrorCodedMsg(e.toString()));
-        }
-      }
-
-      List<Partition> partitions = getPartitionsList(db);
-      boolean atomic = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_ATOMIC);
-
-      String tableFullName = table.getFullyQualifiedName();
-
-      if (partitions == null) {
-        org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable();
-        Map<String, String> parameters = tTable.getParameters();
-        // In the following scenarios, we need to reset the stats to true.
-        // work.getTableSpecs() != null means analyze command
-        // work.getLoadTableDesc().getReplace() is true means insert overwrite command 
-        // work.getLoadFileDesc().getDestinationCreateTable().isEmpty() means CTAS etc.
-        // acidTable will not have accurate stats unless it is set through analyze command.
-        if (work.getTableSpecs() == null && AcidUtils.isFullAcidTable(table)) {
-          StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
-        } else if (work.getTableSpecs() != null
-            || (work.getLoadTableDesc() != null
-                && (work.getLoadTableDesc().getLoadFileType() == LoadFileType.REPLACE_ALL))
-            || (work.getLoadFileDesc() != null && !work.getLoadFileDesc()
-                .getDestinationCreateTable().isEmpty())) {
-          StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
-        }
-        // non-partitioned tables:
-        if (!existStats(parameters) && atomic) {
-          return 0;
-        }
-
-        // The collectable stats for the aggregator needs to be cleared.
-        // For eg. if a file is being loaded, the old number of rows are not valid
-        if (work.isClearAggregatorStats()) {
-          // we choose to keep the invalid stats and only change the setting.
-          StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
-        }
-
-        updateQuickStats(wh, parameters, tTable.getSd());
-        if (StatsSetupConst.areBasicStatsUptoDate(parameters)) {
-          if (statsAggregator != null) {
-            String prefix = getAggregationPrefix(table, null);
-            updateStats(statsAggregator, parameters, prefix, atomic);
-          }
-          // write table stats to metastore
-          if (!getWork().getNoStatsAggregator()) {
-            environmentContext = new EnvironmentContext();
-            environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED,
-                StatsSetupConst.TASK);
-          }
-        }
-
-        getHive().alterTable(table, environmentContext);
-        if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
-          console.printInfo("Table " + tableFullName + " stats: [" + toString(parameters) + ']');
-        }
-        LOG.info("Table {} stats: [{}]", tableFullName, toString(parameters));
-        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
-          Utilities.FILE_OP_LOGGER.trace(
-              "Table " + tableFullName + " stats: [" + toString(parameters) + ']');
-        }
-      } else {
-        // Partitioned table:
-        // Need to get the old stats of the partition
-        // and update the table stats based on the old and new stats.
-        List<Partition> updates = new ArrayList<Partition>();
-
-        //Get the file status up-front for all partitions. Beneficial in cases of blob storage systems
-        final Map<String, FileStatus[]> fileStatusMap = new ConcurrentHashMap<String, FileStatus[]>();
-        int poolSize = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 1);
-        // In case thread count is set to 0, use single thread.
-        poolSize = Math.max(poolSize, 1);
-        final ExecutorService pool = Executors.newFixedThreadPool(poolSize,
-          new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("stats-updater-thread-%d")
-            .build());
-        final List<Future<Void>> futures = Lists.newLinkedList();
-        LOG.debug("Getting file stats of all partitions. threadpool size: {}", poolSize);
-        try {
-          for(final Partition partn : partitions) {
-            final String partitionName = partn.getName();
-            final org.apache.hadoop.hive.metastore.api.Partition tPart = partn.getTPartition();
-            Map<String, String> parameters = tPart.getParameters();
-
-            if (!existStats(parameters) && atomic) {
-              continue;
-            }
-            futures.add(pool.submit(new Callable<Void>() {
-              @Override
-              public Void call() throws Exception {
-                FileStatus[] partfileStatus = wh.getFileStatusesForSD(tPart.getSd());
-                fileStatusMap.put(partitionName,  partfileStatus);
-                return null;
-              }
-            }));
-          }
-          pool.shutdown();
-          for(Future<Void> future : futures) {
-            future.get();
-          }
-        } catch (InterruptedException e) {
-          LOG.debug("Cancelling {} file stats lookup tasks", futures.size());
-          //cancel other futures
-          for (Future future : futures) {
-            future.cancel(true);
-          }
-          // Fail the query if the stats are supposed to be reliable
-          if (work.isStatsReliable()) {
-            ret = 1;
-          }
-        } finally {
-          if (pool != null) {
-            pool.shutdownNow();
-          }
-          LOG.debug("Finished getting file stats of all partitions");
-        }
 
-        for (Partition partn : partitions) {
-          //
-          // get the old partition stats
-          //
-          org.apache.hadoop.hive.metastore.api.Partition tPart = partn.getTPartition();
-          Map<String, String> parameters = tPart.getParameters();
-          if (work.getTableSpecs() == null && AcidUtils.isFullAcidTable(table)) {
-            StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
-          } else if (work.getTableSpecs() != null
-              || (work.getLoadTableDesc() != null
-                  && (work.getLoadTableDesc().getLoadFileType() == LoadFileType.REPLACE_ALL))
-              || (work.getLoadFileDesc() != null && !work.getLoadFileDesc()
-                  .getDestinationCreateTable().isEmpty())) {
-            StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
-          }
-          //only when the stats exist, it is added to fileStatusMap
-          if (!fileStatusMap.containsKey(partn.getName())) {
-            continue;
-          }
-
-          // The collectable stats for the aggregator needs to be cleared.
-          // For eg. if a file is being loaded, the old number of rows are not valid
-          if (work.isClearAggregatorStats()) {
-            // we choose to keep the invalid stats and only change the setting.
-            StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
-          }
+      if (work.isFooterScan()) {
+        work.getBasicStatsNoJobWork().setPartitions(work.getPartitions());
+      }
 
-          updateQuickStats(parameters, fileStatusMap.get(partn.getName()));
-          if (StatsSetupConst.areBasicStatsUptoDate(parameters)) {
-            if (statsAggregator != null) {
-              String prefix = getAggregationPrefix(table, partn);
-              updateStats(statsAggregator, parameters, prefix, atomic);
-            }
-            if (!getWork().getNoStatsAggregator()) {
-              environmentContext = new EnvironmentContext();
-              environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED,
-                  StatsSetupConst.TASK);
-            }
-          }
-          updates.add(new Partition(table, tPart));
+      Hive db = getHive();
+      Table tbl = getTable(db);
 
-          if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
-            console.printInfo("Partition " + tableFullName + partn.getSpec() +
-            " stats: [" + toString(parameters) + ']');
-          }
-          LOG.info("Partition {}{} stats: [{}]", tableFullName, partn.getSpec(),
-            toString(parameters));
-        }
-        if (!updates.isEmpty()) {
-          db.alterPartitions(tableFullName, updates, environmentContext);
+      for (IStatsProcessor task : processors) {
+        task.setDpPartSpecs(dpPartSpecs);
+        ret = task.process(db, tbl);
+        if (ret != 0) {
+          return ret;
         }
       }
-
     } catch (Exception e) {
-      console.printInfo("[Warning] could not update stats.",
-          "Failed with exception " + e.getMessage() + "\n"
-              + StringUtils.stringifyException(e));
-
-      // Fail the query if the stats are supposed to be reliable
-      if (work.isStatsReliable()) {
-        ret = 1;
-      }
-    } finally {
-      if (statsAggregator != null) {
-        statsAggregator.closeConnection(scc);
-      }
+      LOG.error("Failed to run stats task", e);
+      return 1;
     }
-    // The return value of 0 indicates success,
-    // anything else indicates failure
-    return ret;
+    return 0;
   }
 
-  private String getAggregationPrefix(Table table, Partition partition)
-      throws MetaException {
 
-    // prefix is of the form dbName.tblName
-    String prefix = StatsUtils.getFullyQualifiedTableName(table.getDbName(),
-        org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(table.getTableName()));
-    if (partition != null) {
-      return Utilities.join(prefix, Warehouse.makePartPath(partition.getSpec()));
+  private Table getTable(Hive db) throws SemanticException, HiveException {
+    Table tbl = work.getTable();
+    // FIXME for ctas this is still needed because location is not set sometimes
+    if (tbl.getSd().getLocation() == null) {
+      tbl = db.getTable(work.getFullTableName());
     }
-    return prefix;
+    return tbl;
   }
 
-  private StatsAggregator createStatsAggregator(StatsCollectionContext scc, HiveConf conf) throws HiveException {
-    String statsImpl = HiveConf.getVar(conf, HiveConf.ConfVars.HIVESTATSDBCLASS);
-    StatsFactory factory = StatsFactory.newFactory(statsImpl, conf);
-    if (factory == null) {
-      throw new HiveException(ErrorMsg.STATSPUBLISHER_NOT_OBTAINED.getErrorCodedMsg());
-    }
-    // initialize stats publishing table for noscan which has only stats task
-    // the rest of MR task following stats task initializes it in ExecDriver.java
-    StatsPublisher statsPublisher = factory.getStatsPublisher();
-    if (!statsPublisher.init(scc)) { // creating stats table if not exists
-      throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
-    }
-
-    // manufacture a StatsAggregator
-    StatsAggregator statsAggregator = factory.getStatsAggregator();
-    if (!statsAggregator.connect(scc)) {
-      throw new HiveException(ErrorMsg.STATSAGGREGATOR_CONNECTION_ERROR.getErrorCodedMsg(statsImpl));
-    }
-    return statsAggregator;
-  }
-
-  private StatsCollectionContext getContext() throws HiveException {
-
-    StatsCollectionContext scc = new StatsCollectionContext(conf);
-    Task sourceTask = getWork().getSourceTask();
-    if (sourceTask == null) {
-      throw new HiveException(ErrorMsg.STATSAGGREGATOR_SOURCETASK_NULL.getErrorCodedMsg());
-    }
-    scc.setTask(sourceTask);
-    scc.setStatsTmpDir(this.getWork().getStatsTmpDir());
-    return scc;
-  }
-
-  private boolean existStats(Map<String, String> parameters) {
-    return parameters.containsKey(StatsSetupConst.ROW_COUNT)
-        || parameters.containsKey(StatsSetupConst.NUM_FILES)
-        || parameters.containsKey(StatsSetupConst.TOTAL_SIZE)
-        || parameters.containsKey(StatsSetupConst.RAW_DATA_SIZE)
-        || parameters.containsKey(StatsSetupConst.NUM_PARTITIONS);
-  }
-
-  private void updateStats(StatsAggregator statsAggregator,
-      Map<String, String> parameters, String prefix, boolean atomic)
-      throws HiveException {
-
-    String aggKey = prefix.endsWith(Path.SEPARATOR) ? prefix : prefix + Path.SEPARATOR;
-
-    for (String statType : StatsSetupConst.statsRequireCompute) {
-      String value = statsAggregator.aggregateStats(aggKey, statType);
-      if (value != null && !value.isEmpty()) {
-        long longValue = Long.parseLong(value);
-
-        if (work.getLoadTableDesc() != null &&
-                (work.getLoadTableDesc().getLoadFileType() != LoadFileType.REPLACE_ALL)) {
-          String originalValue = parameters.get(statType);
-          if (originalValue != null) {
-            longValue += Long.parseLong(originalValue); // todo: invalid + valid = invalid
-          }
-        }
-        parameters.put(statType, String.valueOf(longValue));
-      } else {
-        if (atomic) {
-          throw new HiveException(ErrorMsg.STATSAGGREGATOR_MISSED_SOMESTATS, statType);
-        }
-      }
-    }
+  @Override
+  public StageType getType() {
+    return StageType.STATS;
   }
 
-  private void updateQuickStats(Warehouse wh, Map<String, String> parameters,
-      StorageDescriptor desc) throws MetaException {
-    /**
-     * calculate fast statistics
-     */
-    FileStatus[] partfileStatus = wh.getFileStatusesForSD(desc);
-    updateQuickStats(parameters, partfileStatus);
+  @Override
+  public String getName() {
+    return "STATS TASK";
   }
 
-  private void updateQuickStats(Map<String, String> parameters,
-      FileStatus[] partfileStatus) throws MetaException {
-    MetaStoreUtils.populateQuickStats(partfileStatus, parameters);
-  }
+  private Collection<Partition> dpPartSpecs;
 
-  private String toString(Map<String, String> parameters) {
-    StringBuilder builder = new StringBuilder();
-    for (String statType : StatsSetupConst.supportedStats) {
-      String value = parameters.get(statType);
-      if (value != null) {
-        if (builder.length() > 0) {
-          builder.append(", ");
-        }
-        builder.append(statType).append('=').append(value);
-      }
+  @Override
+  protected void receiveFeed(FeedType feedType, Object feedValue) {
+    // this method should be called by MoveTask when there are dynamic
+    // partitions generated
+    if (feedType == FeedType.DYNAMIC_PARTITIONS) {
+      dpPartSpecs = (Collection<Partition>) feedValue;
     }
-    return builder.toString();
   }
 
-  /**
-   * Get the list of partitions that need to update statistics.
-   * TODO: we should reuse the Partitions generated at compile time
-   * since getting the list of partitions is quite expensive.
-   *
-   * @return a list of partitions that need to update statistics.
-   * @throws HiveException
-   */
-  private List<Partition> getPartitionsList(Hive db) throws HiveException {
-    if (work.getLoadFileDesc() != null) {
-      return null; //we are in CTAS, so we know there are no partitions
-    }
-
-    List<Partition> list = new ArrayList<Partition>();
+  public static ExecutorService newThreadPool(HiveConf conf) {
+    int numThreads = HiveConf.getIntVar(conf, ConfVars.HIVE_STATS_GATHER_NUM_THREADS);
 
-    if (work.getTableSpecs() != null) {
-
-      // ANALYZE command
-      TableSpec tblSpec = work.getTableSpecs();
-      table = tblSpec.tableHandle;
-      if (!table.isPartitioned()) {
-        return null;
-      }
-      // get all partitions that matches with the partition spec
-      List<Partition> partitions = tblSpec.partitions;
-      if (partitions != null) {
-        for (Partition partn : partitions) {
-          list.add(partn);
-        }
-      }
-    } else if (work.getLoadTableDesc() != null) {
-
-      // INSERT OVERWRITE command
-      LoadTableDesc tbd = work.getLoadTableDesc();
-      table = db.getTable(tbd.getTable().getTableName());
-      if (!table.isPartitioned()) {
-        return null;
-      }
-      DynamicPartitionCtx dpCtx = tbd.getDPCtx();
-      if (dpCtx != null && dpCtx.getNumDPCols() > 0) { // dynamic partitions
-        // If no dynamic partitions are generated, dpPartSpecs may not be initialized
-        if (dpPartSpecs != null) {
-          // load the list of DP partitions and return the list of partition specs
-          list.addAll(dpPartSpecs);
-        }
-      } else { // static partition
-        Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
-        list.add(partn);
-      }
-    }
-    return list;
+    ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("StatsNoJobTask-Thread-%d").build());
+    LOG.info("Initialized threadpool for stats computation with {} threads", numThreads);
+    return executor;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
index ab495cf..75603ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
@@ -349,6 +349,7 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
     final List<Task<? extends Serializable>> leafTasks = new ArrayList<Task<?>>();
 
     NodeUtils.iterateTask(rootTasks, Task.class, new NodeUtils.Function<Task>() {
+      @Override
       public void apply(Task task) {
         List dependents = task.getDependentTasks();
         if (dependents == null || dependents.isEmpty()) {
@@ -648,4 +649,5 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
   public boolean canExecuteInParallel(){
     return true;
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
index 36a5eff..e22dc25 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hive.ql.index.IndexMetadataChangeWork;
 import org.apache.hadoop.hive.ql.io.merge.MergeFileTask;
 import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
 import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
-import org.apache.hadoop.hive.ql.plan.ColumnStatsWork;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.ConditionalWork;
 import org.apache.hadoop.hive.ql.plan.CopyWork;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
@@ -53,8 +53,6 @@ import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.ReplCopyWork;
 import org.apache.hadoop.hive.ql.plan.SparkWork;
-import org.apache.hadoop.hive.ql.plan.StatsNoJobWork;
-import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.TezWork;
 
 /**
@@ -98,10 +96,7 @@ public final class TaskFactory {
 
     taskvec.add(new TaskTuple<MapredLocalWork>(MapredLocalWork.class,
         MapredLocalTask.class));
-    taskvec.add(new TaskTuple<StatsWork>(StatsWork.class,
-        StatsTask.class));
-    taskvec.add(new TaskTuple<StatsNoJobWork>(StatsNoJobWork.class, StatsNoJobTask.class));
-    taskvec.add(new TaskTuple<ColumnStatsWork>(ColumnStatsWork.class, ColumnStatsTask.class));
+    taskvec.add(new TaskTuple<StatsWork>(StatsWork.class, StatsTask.class));
     taskvec.add(new TaskTuple<ColumnStatsUpdateWork>(ColumnStatsUpdateWork.class, ColumnStatsUpdateTask.class));
     taskvec.add(new TaskTuple<MergeFileWork>(MergeFileWork.class,
         MergeFileTask.class));

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 71fa42c..00590e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -153,6 +153,7 @@ import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
+import org.apache.hadoop.hive.ql.plan.IStatsGatherDesc;
 import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.MergeJoinWork;
@@ -161,7 +162,6 @@ import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.plan.TableScanDesc;
 import org.apache.hadoop.hive.ql.plan.api.Adjacency;
 import org.apache.hadoop.hive.ql.plan.api.Graph;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -1620,12 +1620,14 @@ public final class Utilities {
     return removeTempOrDuplicateFiles(
         fs, fileStats, null, dpLevels, numBuckets, hconf, null, 0, false, filesKept);
   }
-  
+
   private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws IOException {
     FileStatus[] items = fs.listStatus(path);
     // remove empty directory since DP insert should not generate empty partitions.
     // empty directories could be generated by crashed Task/ScriptOperator
-    if (items.length != 0) return false;
+    if (items.length != 0) {
+      return false;
+    }
     if (!fs.delete(path, true)) {
       LOG.error("Cannot delete empty directory {}", path);
       throw new IOException("Cannot delete empty directory " + path);
@@ -3607,7 +3609,9 @@ public final class Utilities {
 
       if (op instanceof FileSinkOperator) {
         FileSinkDesc fdesc = ((FileSinkOperator) op).getConf();
-        if (fdesc.isMmTable()) continue; // No need to create for MM tables
+        if (fdesc.isMmTable()) {
+          continue; // No need to create for MM tables
+        }
         Path tempDir = fdesc.getDirName();
         if (tempDir != null) {
           Path tempPath = Utilities.toTempPath(tempDir);
@@ -3923,10 +3927,8 @@ public final class Utilities {
     for (Operator<? extends OperatorDesc> op : ops) {
       OperatorDesc desc = op.getConf();
       String statsTmpDir = null;
-      if (desc instanceof FileSinkDesc) {
-         statsTmpDir = ((FileSinkDesc)desc).getStatsTmpDir();
-      } else if (desc instanceof TableScanDesc) {
-        statsTmpDir = ((TableScanDesc) desc).getTmpStatsDir();
+      if (desc instanceof IStatsGatherDesc) {
+        statsTmpDir = ((IStatsGatherDesc) desc).getTmpStatsDir();
       }
       if (statsTmpDir != null && !statsTmpDir.isEmpty()) {
         statsTmpDirs.add(statsTmpDir);
@@ -4078,7 +4080,9 @@ public final class Utilities {
   }
 
   private static Path[] statusToPath(FileStatus[] statuses) {
-    if (statuses == null) return null;
+    if (statuses == null) {
+      return null;
+    }
     Path[] paths = new Path[statuses.length];
     for (int i = 0; i < statuses.length; ++i) {
       paths[i] = statuses[i].getPath();
@@ -4108,7 +4112,9 @@ public final class Utilities {
       Utilities.FILE_OP_LOGGER.trace("Looking at {} from {}", subDir, lfsPath);
 
       // If sorted, we'll skip a bunch of files.
-      if (lastRelDir != null && subDir.startsWith(lastRelDir)) continue;
+      if (lastRelDir != null && subDir.startsWith(lastRelDir)) {
+        continue;
+      }
       int startIx = skipLevels > 0 ? -1 : 0;
       for (int i = 0; i < skipLevels; ++i) {
         startIx = subDir.indexOf(Path.SEPARATOR_CHAR, startIx + 1);
@@ -4118,7 +4124,9 @@ public final class Utilities {
           break;
         }
       }
-      if (startIx == -1) continue;
+      if (startIx == -1) {
+        continue;
+      }
       int endIx = subDir.indexOf(Path.SEPARATOR_CHAR, startIx + 1);
       if (endIx == -1) {
         Utilities.FILE_OP_LOGGER.info("Expected level of nesting ({}) is not present in"
@@ -4127,7 +4135,9 @@ public final class Utilities {
       }
       lastRelDir = subDir = subDir.substring(0, endIx);
       Path candidate = new Path(relRoot, subDir);
-      if (!filter.accept(candidate)) continue;
+      if (!filter.accept(candidate)) {
+        continue;
+      }
       results.add(fs.makeQualified(candidate));
     }
     return results.toArray(new Path[results.size()]);
@@ -4168,7 +4178,7 @@ public final class Utilities {
 
   public static void writeMmCommitManifest(List<Path> commitPaths, Path specPath, FileSystem fs,
       String taskId, Long txnId, int stmtId, String unionSuffix) throws HiveException {
-    if (CollectionUtils.isEmpty(commitPaths)) {
+    if (commitPaths.isEmpty()) {
       return;
     }
     // We assume one FSOP per task (per specPath), so we create it in specPath.
@@ -4288,11 +4298,15 @@ public final class Utilities {
       throw new HiveException("The following files were committed but not found: " + committed);
     }
 
-    if (mmDirectories.isEmpty()) return;
+    if (mmDirectories.isEmpty()) {
+      return;
+    }
 
     // TODO: see HIVE-14886 - removeTempOrDuplicateFiles is broken for list bucketing,
     //       so maintain parity here by not calling it at all.
-    if (lbLevels != 0) return;
+    if (lbLevels != 0) {
+      return;
+    }
     // Create fake file statuses to avoid querying the file system. removeTempOrDuplicateFiles
     // doesn't need tocheck anything except path and directory status for MM directories.
     FileStatus[] finalResults = new FileStatus[mmDirectories.size()];
@@ -4320,7 +4334,9 @@ public final class Utilities {
     for (FileStatus child : fs.listStatus(dir)) {
       Path childPath = child.getPath();
       if (unionSuffix == null) {
-        if (committed.remove(childPath.toString())) continue; // A good file.
+        if (committed.remove(childPath.toString())) {
+          continue; // A good file.
+        }
         deleteUncommitedFile(childPath, fs);
       } else if (!child.isDirectory()) {
         if (committed.contains(childPath.toString())) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index cf4df9b..cceea01 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1794,7 +1794,9 @@ public class Hive {
       }
 
       // column stats will be inaccurate
-      StatsSetupConst.clearColumnStatsState(newTPart.getParameters());
+      if (!hasFollowingStatsTask) {
+        StatsSetupConst.clearColumnStatsState(newTPart.getParameters());
+      }
 
       // recreate the partition if it existed before
       if (isSkewedStoreAsSubdir) {
@@ -1813,8 +1815,8 @@ public class Hive {
       if (oldPart == null) {
         newTPart.getTPartition().setParameters(new HashMap<String,String>());
         if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
-          StatsSetupConst.setStatsStateForCreateTable(newTPart.getParameters(), null,
-              StatsSetupConst.TRUE);
+          StatsSetupConst.setStatsStateForCreateTable(newTPart.getParameters(),
+              MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE);
         }
         MetaStoreUtils.populateQuickStats(HiveStatsUtils.getFileStatusRecurse(newPartPath, -1, newPartPath.getFileSystem(conf)), newTPart.getParameters());
         try {
@@ -2299,7 +2301,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
 
     //column stats will be inaccurate
-    StatsSetupConst.clearColumnStatsState(tbl.getParameters());
+    if (!hasFollowingStatsTask) {
+      StatsSetupConst.clearColumnStatsState(tbl.getParameters());
+    }
 
     try {
       if (isSkewedStoreAsSubdir) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 78e83af..1c26200 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -196,7 +197,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
     Collections.sort(tableNames);
     return tableNames;
   }
-  
+
   @Override
   public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
       throws MetaException {
@@ -235,7 +236,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
     }
     return tableMetas;
   }
-  
+
   private boolean matchesAny(String string, List<Matcher> matchers) {
     for (Matcher matcher : matchers) {
       if (matcher.reset(string).matches()) {
@@ -399,6 +400,8 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
       EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException,
       MetaException, NoSuchObjectException, TException {
 
+    boolean isVirtualTable = tbl.getTableName().startsWith(SemanticAnalyzer.VALUES_TMP_TABLE_NAME_PREFIX);
+
     SessionState ss = SessionState.get();
     if (ss == null) {
       throw new MetaException("No current SessionState, cannot create temporary table"
@@ -434,6 +437,10 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
 
     // Add temp table info to current session
     Table tTable = new Table(tbl);
+    if (!isVirtualTable) {
+      StatsSetupConst.setStatsStateForCreateTable(tbl.getParameters(),
+          org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getColumnNamesForTable(tbl), StatsSetupConst.TRUE);
+    }
     if (tables == null) {
       tables = new HashMap<String, Table>();
       ss.getTempTables().put(dbName, tables);
@@ -466,8 +473,6 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
     }
 
     org.apache.hadoop.hive.metastore.api.Table newtCopy = deepCopyAndLowerCaseTable(newt);
-    MetaStoreUtils.updateTableStatsFast(newtCopy,
-        getWh().getFileStatusesForSD(newtCopy.getSd()), false, true, envContext);
     Table newTable = new Table(newtCopy);
     String newDbName = newTable.getDbName();
     String newTableName = newTable.getTableName();
@@ -656,7 +661,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
   public static Map<String, Table> getTempTablesForDatabase(String dbName) {
     return getTempTables().get(dbName);
   }
-  
+
   public static Map<String, Map<String, Table>> getTempTables() {
     SessionState ss = SessionState.get();
     if (ss == null) {
@@ -712,6 +717,13 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
           ssTableColStats);
     }
     mergeColumnStats(ssTableColStats, colStats);
+
+    List<String> colNames = new ArrayList<>();
+    for (ColumnStatisticsObj obj : colStats.getStatsObj()) {
+      colNames.add(obj.getColName());
+    }
+    org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tableName);
+    StatsSetupConst.setColumnStatsState(table.getParameters(), colNames);
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
index 768640c..4fb39fc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
@@ -23,12 +23,10 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat;
@@ -41,10 +39,10 @@ import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.StatsWork;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.StatsNoJobWork;
-import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.mapred.InputFormat;
 
 /**
@@ -67,8 +65,8 @@ public class GenMRTableScan1 implements NodeProcessor {
     TableScanOperator op = (TableScanOperator) nd;
     GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
     ParseContext parseCtx = ctx.getParseCtx();
-    Class<? extends InputFormat> inputFormat = op.getConf().getTableMetadata()
-        .getInputFormatClass();
+    Table table = op.getConf().getTableMetadata();
+    Class<? extends InputFormat> inputFormat = table.getInputFormatClass();
     Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
 
     // create a dummy MapReduce task
@@ -93,19 +91,17 @@ public class GenMRTableScan1 implements NodeProcessor {
             // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
 
             // There will not be any MR or Tez job above this task
-            StatsNoJobWork snjWork = new StatsNoJobWork(op.getConf().getTableMetadata().getTableSpec());
-            snjWork.setStatsReliable(parseCtx.getConf().getBoolVar(
-                HiveConf.ConfVars.HIVE_STATS_RELIABLE));
+            StatsWork statWork = new StatsWork(table, parseCtx.getConf());
+            statWork.setFooterScan();
+
             // If partition is specified, get pruned partition list
             Set<Partition> confirmedParts = GenMapRedUtils.getConfirmedPartitionsForScan(op);
             if (confirmedParts.size() > 0) {
-              Table source = op.getConf().getTableMetadata();
               List<String> partCols = GenMapRedUtils.getPartitionColumns(op);
-              PrunedPartitionList partList = new PrunedPartitionList(source, confirmedParts,
-                  partCols, false);
-              snjWork.setPrunedPartitionList(partList);
+              PrunedPartitionList partList = new PrunedPartitionList(table, confirmedParts, partCols, false);
+              statWork.addInputPartitions(partList.getPartitions());
             }
-            Task<StatsNoJobWork> snjTask = TaskFactory.get(snjWork, parseCtx.getConf());
+            Task<StatsWork> snjTask = TaskFactory.get(statWork, parseCtx.getConf());
             ctx.setCurrTask(snjTask);
             ctx.setCurrTopOp(null);
             ctx.getRootTasks().clear();
@@ -115,14 +111,15 @@ public class GenMRTableScan1 implements NodeProcessor {
             // The plan consists of a simple MapRedTask followed by a StatsTask.
             // The MR task is just a simple TableScanOperator
 
-            StatsWork statsWork = new StatsWork(op.getConf().getTableMetadata().getTableSpec());
-            statsWork.setAggKey(op.getConf().getStatsAggPrefix());
-            statsWork.setStatsTmpDir(op.getConf().getTmpStatsDir());
-            statsWork.setSourceTask(currTask);
-            statsWork.setStatsReliable(parseCtx.getConf().getBoolVar(
-                HiveConf.ConfVars.HIVE_STATS_RELIABLE));
-            Task<StatsWork> statsTask = TaskFactory.get(statsWork, parseCtx.getConf());
-            currTask.addDependentTask(statsTask);
+            BasicStatsWork statsWork = new BasicStatsWork(table.getTableSpec());
+
+            statsWork.setNoScanAnalyzeCommand(noScan);
+            StatsWork columnStatsWork = new StatsWork(table, statsWork, parseCtx.getConf());
+            columnStatsWork.collectStatsFromAggregator(op.getConf());
+
+            columnStatsWork.setSourceTask(currTask);
+            Task<StatsWork> columnStatsTask = TaskFactory.get(columnStatsWork, parseCtx.getConf());
+            currTask.addDependentTask(columnStatsTask);
             if (!ctx.getRootTasks().contains(currTask)) {
               ctx.getRootTasks().add(currTask);
             }
@@ -130,10 +127,9 @@ public class GenMRTableScan1 implements NodeProcessor {
             // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
             // The plan consists of a StatsTask only.
             if (noScan) {
-              statsTask.setParentTasks(null);
-              statsWork.setNoScanAnalyzeCommand(true);
+              columnStatsTask.setParentTasks(null);
               ctx.getRootTasks().remove(currTask);
-              ctx.getRootTasks().add(statsTask);
+              ctx.getRootTasks().add(columnStatsTask);
             }
 
             currWork.getMapWork().setGatheringStats(true);
@@ -147,9 +143,8 @@ public class GenMRTableScan1 implements NodeProcessor {
             Set<Partition> confirmedPartns = GenMapRedUtils
                 .getConfirmedPartitionsForScan(op);
             if (confirmedPartns.size() > 0) {
-              Table source = op.getConf().getTableMetadata();
               List<String> partCols = GenMapRedUtils.getPartitionColumns(op);
-              PrunedPartitionList partList = new PrunedPartitionList(source, confirmedPartns, partCols, false);
+              PrunedPartitionList partList = new PrunedPartitionList(table, confirmedPartns, partCols, false);
               GenMapRedUtils.setTaskPlan(currAliasId, op, currTask, false, ctx, partList);
             } else { // non-partitioned table
               GenMapRedUtils.setTaskPlan(currAliasId, op, currTask, false, ctx);


[03/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats8.q.out b/ql/src/test/results/clientpositive/spark/stats8.q.out
index b83adbd..8dedb62 100644
--- a/ql/src/test/results/clientpositive/spark/stats8.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats8.q.out
@@ -53,7 +53,8 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics
 PREHOOK: type: QUERY
@@ -164,7 +165,8 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics
 PREHOOK: type: QUERY
@@ -236,7 +238,8 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics
 PREHOOK: type: QUERY
@@ -308,7 +311,8 @@ STAGE PLANS:
                   Statistics: Num rows: 1500 Data size: 15936 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics
 PREHOOK: type: QUERY
@@ -380,7 +384,8 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds, hr) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats9.q.out b/ql/src/test/results/clientpositive/spark/stats9.q.out
index c2ca23f..08590a5 100644
--- a/ql/src/test/results/clientpositive/spark/stats9.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats9.q.out
@@ -36,7 +36,8 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 11603 Basic stats: COMPLETE Column stats: COMPLETE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcbucket compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
index 6d3f413..c8fa18c 100644
--- a/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
@@ -44,7 +44,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan
 PREHOOK: type: QUERY
@@ -305,7 +306,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart_partial PARTITION(ds='2008-04-08') compute statistics noscan
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
index be906f6..758503f 100644
--- a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
@@ -181,30 +181,40 @@ STAGE PLANS:
 PREHOOK: query: analyze table stats_null compute statistics for columns a,b,c,d
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_null
+PREHOOK: Output: default@stats_null
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_null compute statistics for columns a,b,c,d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_null
+POSTHOOK: Output: default@stats_null
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_null_part partition(dt='2010') compute statistics for columns a,b,c,d
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_null_part
 PREHOOK: Input: default@stats_null_part@dt=2010
+PREHOOK: Output: default@stats_null_part
+PREHOOK: Output: default@stats_null_part@dt=2010
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_null_part partition(dt='2010') compute statistics for columns a,b,c,d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_null_part
 POSTHOOK: Input: default@stats_null_part@dt=2010
+POSTHOOK: Output: default@stats_null_part
+POSTHOOK: Output: default@stats_null_part@dt=2010
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_null_part partition(dt='2011') compute statistics for columns a,b,c,d
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_null_part
 PREHOOK: Input: default@stats_null_part@dt=2011
+PREHOOK: Output: default@stats_null_part
+PREHOOK: Output: default@stats_null_part@dt=2011
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_null_part partition(dt='2011') compute statistics for columns a,b,c,d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_null_part
 POSTHOOK: Input: default@stats_null_part@dt=2011
+POSTHOOK: Output: default@stats_null_part
+POSTHOOK: Output: default@stats_null_part@dt=2011
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted stats_null_part partition (dt='2010')
 PREHOOK: type: DESCTABLE
@@ -372,12 +382,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_null_part
 PREHOOK: Input: default@stats_null_part@dt=1
 PREHOOK: Input: default@stats_null_part@dt=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Output: default@stats_null_part
+PREHOOK: Output: default@stats_null_part@dt=1
+PREHOOK: Output: default@stats_null_part@dt=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_null_part compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_null_part
 POSTHOOK: Input: default@stats_null_part@dt=1
 POSTHOOK: Input: default@stats_null_part@dt=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Output: default@stats_null_part
+POSTHOOK: Output: default@stats_null_part@dt=1
+POSTHOOK: Output: default@stats_null_part@dt=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted stats_null_part partition(dt = 1) a
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out b/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
new file mode 100644
index 0000000..403bc28
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
@@ -0,0 +1,188 @@
+PREHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING)
+partitioned by (ds string, hr string)
+stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@analyze_srcpart_partial_scan
+POSTHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING)
+partitioned by (ds string, hr string)
+stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@analyze_srcpart_partial_scan
+PREHOOK: query: insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@analyze_srcpart_partial_scan
+POSTHOOK: query: insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-09/hr=11
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-09/hr=12
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	analyze_srcpart_partial_scan	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	1                   
+	totalSize           	5293                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain
+analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-0, Stage-2
+
+STAGE PLANS:
+  Stage: Stage-2
+    Partial Scan Statistics
+
+  Stage: Stage-1
+    Stats Work
+      Basic Stats Work:
+
+PREHOOK: query: analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+PREHOOK: Input: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+PREHOOK: Output: default@analyze_srcpart_partial_scan
+PREHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+POSTHOOK: query: analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: Input: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@analyze_srcpart_partial_scan
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+PREHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	analyze_srcpart_partial_scan	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	1                   
+	totalSize           	5293                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-09',hr=11)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-09',hr=11)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-09, 11]    	 
+Database:           	default             	 
+Table:              	analyze_srcpart_partial_scan	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	1                   
+	totalSize           	5293                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table analyze_srcpart_partial_scan
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+PREHOOK: Output: default@analyze_srcpart_partial_scan
+POSTHOOK: query: drop table analyze_srcpart_partial_scan
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: Output: default@analyze_srcpart_partial_scan

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
index a0adcae..5b4aa49 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
@@ -257,7 +257,8 @@ STAGE PLANS:
               name: default.src_5
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -270,7 +271,8 @@ STAGE PLANS:
               name: default.src_4
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 Warning: Shuffle Join JOIN[31][tables = [sq_2_notin_nullcheck]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: from src b 
@@ -680,7 +682,8 @@ STAGE PLANS:
               name: default.src_4
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -693,7 +696,8 @@ STAGE PLANS:
               name: default.src_5
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 Warning: Map Join MAPJOIN[47][bigTable=b] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: from src b 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/temp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/temp_table.q.out b/ql/src/test/results/clientpositive/spark/temp_table.q.out
index c2ec3b4..342d08c 100644
--- a/ql/src/test/results/clientpositive/spark/temp_table.q.out
+++ b/ql/src/test/results/clientpositive/spark/temp_table.q.out
@@ -52,7 +52,8 @@ STAGE PLANS:
           isTemporary: true
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -118,7 +119,8 @@ STAGE PLANS:
           isTemporary: true
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union10.q.out b/ql/src/test/results/clientpositive/spark/union10.q.out
index ea1bebb..c83afda 100644
--- a/ql/src/test/results/clientpositive/spark/union10.q.out
+++ b/ql/src/test/results/clientpositive/spark/union10.q.out
@@ -165,7 +165,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
   select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union12.q.out b/ql/src/test/results/clientpositive/spark/union12.q.out
index 0639956..7d4ffb5 100644
--- a/ql/src/test/results/clientpositive/spark/union12.q.out
+++ b/ql/src/test/results/clientpositive/spark/union12.q.out
@@ -165,7 +165,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
   select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union17.q.out b/ql/src/test/results/clientpositive/spark/union17.q.out
index a967c07..bc72b6c 100644
--- a/ql/src/test/results/clientpositive/spark/union17.q.out
+++ b/ql/src/test/results/clientpositive/spark/union17.q.out
@@ -197,7 +197,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -210,7 +211,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union18.q.out b/ql/src/test/results/clientpositive/spark/union18.q.out
index 653c54c..124dbdc 100644
--- a/ql/src/test/results/clientpositive/spark/union18.q.out
+++ b/ql/src/test/results/clientpositive/spark/union18.q.out
@@ -130,7 +130,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -143,7 +144,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union19.q.out b/ql/src/test/results/clientpositive/spark/union19.q.out
index fe5902f..4d2d56a 100644
--- a/ql/src/test/results/clientpositive/spark/union19.q.out
+++ b/ql/src/test/results/clientpositive/spark/union19.q.out
@@ -159,7 +159,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -172,7 +173,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION  ALL  

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union22.q.out b/ql/src/test/results/clientpositive/spark/union22.q.out
index 6acaba8..9a8e553 100644
--- a/ql/src/test/results/clientpositive/spark/union22.q.out
+++ b/ql/src/test/results/clientpositive/spark/union22.q.out
@@ -407,7 +407,8 @@ STAGE PLANS:
               name: default.dst_union22
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table dst_union22 partition (ds='2')

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union25.q.out b/ql/src/test/results/clientpositive/spark/union25.q.out
index 559b318..c5a001a 100644
--- a/ql/src/test/results/clientpositive/spark/union25.q.out
+++ b/ql/src/test/results/clientpositive/spark/union25.q.out
@@ -189,5 +189,6 @@ STAGE PLANS:
           name: default.tmp_unionall
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union28.q.out b/ql/src/test/results/clientpositive/spark/union28.q.out
index 7ee06fe..83c105d 100644
--- a/ql/src/test/results/clientpositive/spark/union28.q.out
+++ b/ql/src/test/results/clientpositive/spark/union28.q.out
@@ -135,7 +135,8 @@ STAGE PLANS:
               name: default.union_subq_union
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table union_subq_union 
 select * from (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union29.q.out b/ql/src/test/results/clientpositive/spark/union29.q.out
index 05c44d1..3fc229b 100644
--- a/ql/src/test/results/clientpositive/spark/union29.q.out
+++ b/ql/src/test/results/clientpositive/spark/union29.q.out
@@ -117,7 +117,8 @@ STAGE PLANS:
               name: default.union_subq_union
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table union_subq_union 
 select * from (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union30.q.out b/ql/src/test/results/clientpositive/spark/union30.q.out
index 9d827eb..87ae311 100644
--- a/ql/src/test/results/clientpositive/spark/union30.q.out
+++ b/ql/src/test/results/clientpositive/spark/union30.q.out
@@ -170,7 +170,8 @@ STAGE PLANS:
               name: default.union_subq_union
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table union_subq_union 
 select * from (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union31.q.out b/ql/src/test/results/clientpositive/spark/union31.q.out
index 10f8bdb..0b02386 100644
--- a/ql/src/test/results/clientpositive/spark/union31.q.out
+++ b/ql/src/test/results/clientpositive/spark/union31.q.out
@@ -235,7 +235,8 @@ STAGE PLANS:
               name: default.t3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -248,7 +249,8 @@ STAGE PLANS:
               name: default.t4
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from
 (select * from t1
@@ -488,7 +490,8 @@ STAGE PLANS:
               name: default.t5
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -501,7 +504,8 @@ STAGE PLANS:
               name: default.t6
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from
 (
@@ -772,7 +776,8 @@ STAGE PLANS:
               name: default.t7
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -785,7 +790,8 @@ STAGE PLANS:
               name: default.t8
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from
 (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union33.q.out b/ql/src/test/results/clientpositive/spark/union33.q.out
index def5f69..8e3e2a2 100644
--- a/ql/src/test/results/clientpositive/spark/union33.q.out
+++ b/ql/src/test/results/clientpositive/spark/union33.q.out
@@ -124,7 +124,8 @@ STAGE PLANS:
               name: default.test_src
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (
@@ -277,7 +278,8 @@ STAGE PLANS:
               name: default.test_src
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_src 
 SELECT key, value FROM (

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union4.q.out b/ql/src/test/results/clientpositive/spark/union4.q.out
index cb8c6a2..f78e82d 100644
--- a/ql/src/test/results/clientpositive/spark/union4.q.out
+++ b/ql/src/test/results/clientpositive/spark/union4.q.out
@@ -121,7 +121,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src s1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union6.q.out b/ql/src/test/results/clientpositive/spark/union6.q.out
index 6f61839..91c6b2b 100644
--- a/ql/src/test/results/clientpositive/spark/union6.q.out
+++ b/ql/src/test/results/clientpositive/spark/union6.q.out
@@ -94,7 +94,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table tmptable
 select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union_lateralview.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_lateralview.q.out b/ql/src/test/results/clientpositive/spark/union_lateralview.q.out
index fe9afb8..8835a98 100644
--- a/ql/src/test/results/clientpositive/spark/union_lateralview.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_lateralview.q.out
@@ -196,7 +196,8 @@ STAGE PLANS:
               name: default.test_union_lateral_view
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_union_lateral_view
 SELECT b.key, d.arr_ele, d.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/union_top_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_top_level.q.out b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
index 6adf6c4..86e92d2 100644
--- a/ql/src/test/results/clientpositive/spark/union_top_level.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
@@ -497,7 +497,8 @@ STAGE PLANS:
           name: default.union_top
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table union_top as
 select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a
@@ -709,7 +710,8 @@ STAGE PLANS:
               name: default.union_top
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert into table union_top
 select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a
@@ -913,7 +915,8 @@ STAGE PLANS:
               name: default.union_top
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table union_top
 select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_char_4.q.out b/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
index 5f74aec..d0a463ec 100644
--- a/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
@@ -190,5 +190,6 @@ STAGE PLANS:
               name: default.char_lazy_binary_columnar
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
index a2726f9..0287c0a 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -186,10 +186,12 @@ POSTHOOK: Output: default@small_alltypesorc_a
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 PREHOOK: query: select * from small_alltypesorc_a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
index 026e5f6..ae22c78 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -191,10 +191,12 @@ POSTHOOK: Output: default@small_alltypesorc_a
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 PREHOOK: query: select * from small_alltypesorc_a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
index dbbfd34..f8d1ec2 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
@@ -191,10 +191,12 @@ POSTHOOK: Output: default@small_alltypesorc_a
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a
+PREHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_a
+POSTHOOK: Output: default@small_alltypesorc_a
 #### A masked pattern was here ####
 PREHOOK: query: select * from small_alltypesorc_a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
index ffce9e6..a55250b 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
@@ -201,10 +201,12 @@ POSTHOOK: Output: default@small_alltypesorc_b
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_b
+PREHOOK: Output: default@small_alltypesorc_b
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_alltypesorc_b
+POSTHOOK: Output: default@small_alltypesorc_b
 #### A masked pattern was here ####
 PREHOOK: query: select * from small_alltypesorc_b
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
index 4f25253..680ee42 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
@@ -27,10 +27,12 @@ POSTHOOK: Output: default@sorted_mod_4
 PREHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@sorted_mod_4
+PREHOOK: Output: default@sorted_mod_4
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@sorted_mod_4
+POSTHOOK: Output: default@sorted_mod_4
 #### A masked pattern was here ####
 PREHOOK: query: create table small_table stored
 as orc as select ctinyint, cbigint from alltypesorc limit 100
@@ -57,10 +59,12 @@ POSTHOOK: Output: default@small_table
 PREHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_table
+PREHOOK: Output: default@small_table
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_table
+POSTHOOK: Output: default@small_table
 #### A masked pattern was here ####
 PREHOOK: query: explain vectorization detail formatted
 select count(*) from (select s.*, st.*
@@ -267,10 +271,12 @@ POSTHOOK: Output: default@mod_8_mod_4
 PREHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@mod_8_mod_4
+PREHOOK: Output: default@mod_8_mod_4
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@mod_8_mod_4
+POSTHOOK: Output: default@mod_8_mod_4
 #### A masked pattern was here ####
 PREHOOK: query: create table small_table2 stored
 as orc as select pmod(ctinyint, 16) as cmodtinyint, cbigint from alltypesorc limit 100
@@ -297,10 +303,12 @@ POSTHOOK: Output: default@small_table2
 PREHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_table2
+PREHOOK: Output: default@small_table2
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_table2
+POSTHOOK: Output: default@small_table2
 #### A masked pattern was here ####
 PREHOOK: query: explain vectorization detail formatted
 select count(*) from (select s.*, st.*

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
index 4036058..84cd13e 100644
--- a/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
@@ -190,5 +190,6 @@ STAGE PLANS:
               name: default.varchar_lazy_binary_columnar
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
index 31f54f4..d91cb0e 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
@@ -4018,7 +4018,8 @@ STAGE PLANS:
               name: default.part_4
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -4031,7 +4032,8 @@ STAGE PLANS:
               name: default.part_5
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from noop(on part_orc 
 partition by p_mfgr 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out b/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out
index fc5bab0..8f463e8 100644
--- a/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out
+++ b/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out
@@ -29,10 +29,12 @@ POSTHOOK: Output: default@s/c
 PREHOOK: query: ANALYZE TABLE `s/c` COMPUTE STATISTICS FOR COLUMNS key,value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s/c
+PREHOOK: Output: default@s/c
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE `s/c` COMPUTE STATISTICS FOR COLUMNS key,value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@s/c
+POSTHOOK: Output: default@s/c
 #### A masked pattern was here ####
 PREHOOK: query: SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats0.q.out b/ql/src/test/results/clientpositive/stats0.q.out
index c7e8da9..e993eeb 100644
--- a/ql/src/test/results/clientpositive/stats0.q.out
+++ b/ql/src/test/results/clientpositive/stats0.q.out
@@ -144,7 +144,8 @@ STAGE PLANS:
               name: default.stats_non_partitioned
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table stats_non_partitioned
@@ -731,7 +732,8 @@ STAGE PLANS:
               name: default.stats_partitioned
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table stats_partitioned partition (ds='1')
 select * from src
@@ -1468,7 +1470,8 @@ STAGE PLANS:
               name: default.stats_non_partitioned
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-3
@@ -2249,7 +2252,8 @@ STAGE PLANS:
               name: default.stats_partitioned
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats1.q.out b/ql/src/test/results/clientpositive/stats1.q.out
index 075e16b..2675ea5 100644
--- a/ql/src/test/results/clientpositive/stats1.q.out
+++ b/ql/src/test/results/clientpositive/stats1.q.out
@@ -104,7 +104,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE tmptable
 SELECT unionsrc.key, unionsrc.value 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats10.q.out b/ql/src/test/results/clientpositive/stats10.q.out
index fa6142f..acd482b 100644
--- a/ql/src/test/results/clientpositive/stats10.q.out
+++ b/ql/src/test/results/clientpositive/stats10.q.out
@@ -62,7 +62,8 @@ STAGE PLANS:
               name: default.bucket3_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
 select * from src
@@ -374,7 +375,8 @@ STAGE PLANS:
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table bucket3_1 partition (ds) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats12.q.out b/ql/src/test/results/clientpositive/stats12.q.out
index 3b91cc9..d6380a4 100644
--- a/ql/src/test/results/clientpositive/stats12.q.out
+++ b/ql/src/test/results/clientpositive/stats12.q.out
@@ -150,8 +150,9 @@ STAGE PLANS:
         /analyze_srcpart/ds=2008-04-08/hr=12 [analyze_srcpart]
 
   Stage: Stage-1
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: default.analyze_srcpart/
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.analyze_srcpart/
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats13.q.out b/ql/src/test/results/clientpositive/stats13.q.out
index 903e256..a2d9d3e 100644
--- a/ql/src/test/results/clientpositive/stats13.q.out
+++ b/ql/src/test/results/clientpositive/stats13.q.out
@@ -104,8 +104,9 @@ STAGE PLANS:
         /analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart]
 
   Stage: Stage-1
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: default.analyze_srcpart/
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.analyze_srcpart/
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats2.q.out b/ql/src/test/results/clientpositive/stats2.q.out
index 991b137..65691d3 100644
--- a/ql/src/test/results/clientpositive/stats2.q.out
+++ b/ql/src/test/results/clientpositive/stats2.q.out
@@ -132,7 +132,8 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_t1 partition (ds, hr) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats3.q.out b/ql/src/test/results/clientpositive/stats3.q.out
index 7d4ed9b..212ddcc 100644
--- a/ql/src/test/results/clientpositive/stats3.q.out
+++ b/ql/src/test/results/clientpositive/stats3.q.out
@@ -54,7 +54,8 @@ STAGE PLANS:
               name: default.hive_test_src
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src
 PREHOOK: type: LOAD

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats4.q.out b/ql/src/test/results/clientpositive/stats4.q.out
index c0c3c12..741e47b 100644
--- a/ql/src/test/results/clientpositive/stats4.q.out
+++ b/ql/src/test/results/clientpositive/stats4.q.out
@@ -120,7 +120,8 @@ STAGE PLANS:
               name: default.nzhang_part1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-4
     Map Reduce
@@ -175,7 +176,8 @@ STAGE PLANS:
               name: default.nzhang_part2
 
   Stage: Stage-9
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-10
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats5.q.out b/ql/src/test/results/clientpositive/stats5.q.out
index b71c7c1..8d4f9c4 100644
--- a/ql/src/test/results/clientpositive/stats5.q.out
+++ b/ql/src/test/results/clientpositive/stats5.q.out
@@ -27,7 +27,8 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_src compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats7.q.out b/ql/src/test/results/clientpositive/stats7.q.out
index 5db2619..0be8848 100644
--- a/ql/src/test/results/clientpositive/stats7.q.out
+++ b/ql/src/test/results/clientpositive/stats7.q.out
@@ -50,7 +50,8 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats8.q.out b/ql/src/test/results/clientpositive/stats8.q.out
index 3a617d4..6969ca8 100644
--- a/ql/src/test/results/clientpositive/stats8.q.out
+++ b/ql/src/test/results/clientpositive/stats8.q.out
@@ -50,7 +50,8 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics
 PREHOOK: type: QUERY
@@ -158,7 +159,8 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics
 PREHOOK: type: QUERY
@@ -227,7 +229,8 @@ STAGE PLANS:
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics
 PREHOOK: type: QUERY
@@ -296,7 +299,8 @@ STAGE PLANS:
             Statistics: Num rows: 1500 Data size: 15936 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics
 PREHOOK: type: QUERY
@@ -365,7 +369,8 @@ STAGE PLANS:
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds, hr) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats9.q.out b/ql/src/test/results/clientpositive/stats9.q.out
index 7556c05..d056b29 100644
--- a/ql/src/test/results/clientpositive/stats9.q.out
+++ b/ql/src/test/results/clientpositive/stats9.q.out
@@ -33,7 +33,8 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 11603 Basic stats: COMPLETE Column stats: COMPLETE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcbucket compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out b/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out
index b855b38..5456654 100644
--- a/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out
+++ b/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out
@@ -65,7 +65,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats_invalidation.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_invalidation.q.out b/ql/src/test/results/clientpositive/stats_invalidation.q.out
index 77d39d4..8200582 100644
--- a/ql/src/test/results/clientpositive/stats_invalidation.q.out
+++ b/ql/src/test/results/clientpositive/stats_invalidation.q.out
@@ -21,10 +21,12 @@ POSTHOOK: Lineage: stats_invalid.value SIMPLE [(src)src.FieldSchema(name:value,
 PREHOOK: query: analyze table stats_invalid compute statistics for columns key,value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_invalid
+PREHOOK: Output: default@stats_invalid
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_invalid compute statistics for columns key,value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_invalid
+POSTHOOK: Output: default@stats_invalid
 #### A masked pattern was here ####
 PREHOOK: query: desc formatted stats_invalid
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats_missing_warning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_missing_warning.q.out b/ql/src/test/results/clientpositive/stats_missing_warning.q.out
index 0ed70a0..b905785 100644
--- a/ql/src/test/results/clientpositive/stats_missing_warning.q.out
+++ b/ql/src/test/results/clientpositive/stats_missing_warning.q.out
@@ -117,26 +117,32 @@ POSTHOOK: Input: default@missing_stats_t3
 PREHOOK: query: ANALYZE TABLE missing_stats_t1 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@missing_stats_t1
+PREHOOK: Output: default@missing_stats_t1
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE missing_stats_t1 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@missing_stats_t1
+POSTHOOK: Output: default@missing_stats_t1
 #### A masked pattern was here ####
 PREHOOK: query: ANALYZE TABLE missing_stats_t2 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@missing_stats_t2
+PREHOOK: Output: default@missing_stats_t2
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE missing_stats_t2 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@missing_stats_t2
+POSTHOOK: Output: default@missing_stats_t2
 #### A masked pattern was here ####
 PREHOOK: query: ANALYZE TABLE missing_stats_t3 COMPUTE STATISTICS FOR COLUMNS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@missing_stats_t3
+PREHOOK: Output: default@missing_stats_t3
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE missing_stats_t3 COMPUTE STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@missing_stats_t3
+POSTHOOK: Output: default@missing_stats_t3
 #### A masked pattern was here ####
 PREHOOK: query: SELECT COUNT(*)
 FROM missing_stats_t1 t1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats_noscan_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/stats_noscan_1.q.out
index 6d3f413..c8fa18c 100644
--- a/ql/src/test/results/clientpositive/stats_noscan_1.q.out
+++ b/ql/src/test/results/clientpositive/stats_noscan_1.q.out
@@ -44,7 +44,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics noscan
 PREHOOK: type: QUERY
@@ -305,7 +306,8 @@ STAGE DEPENDENCIES:
 
 STAGE PLANS:
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart_partial PARTITION(ds='2008-04-08') compute statistics noscan
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_only_null.q.out b/ql/src/test/results/clientpositive/stats_only_null.q.out
index bcbc3f4..5bab960 100644
--- a/ql/src/test/results/clientpositive/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/stats_only_null.q.out
@@ -169,30 +169,40 @@ STAGE PLANS:
 PREHOOK: query: analyze table stats_null compute statistics for columns a,b,c,d
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_null
+PREHOOK: Output: default@stats_null
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_null compute statistics for columns a,b,c,d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_null
+POSTHOOK: Output: default@stats_null
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_null_part partition(dt='2010') compute statistics for columns a,b,c,d
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_null_part
 PREHOOK: Input: default@stats_null_part@dt=2010
+PREHOOK: Output: default@stats_null_part
+PREHOOK: Output: default@stats_null_part@dt=2010
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_null_part partition(dt='2010') compute statistics for columns a,b,c,d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_null_part
 POSTHOOK: Input: default@stats_null_part@dt=2010
+POSTHOOK: Output: default@stats_null_part
+POSTHOOK: Output: default@stats_null_part@dt=2010
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_null_part partition(dt='2011') compute statistics for columns a,b,c,d
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_null_part
 PREHOOK: Input: default@stats_null_part@dt=2011
+PREHOOK: Output: default@stats_null_part
+PREHOOK: Output: default@stats_null_part@dt=2011
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_null_part partition(dt='2011') compute statistics for columns a,b,c,d
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_null_part
 POSTHOOK: Input: default@stats_null_part@dt=2011
+POSTHOOK: Output: default@stats_null_part
+POSTHOOK: Output: default@stats_null_part@dt=2011
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted stats_null_part partition (dt='2010')
 PREHOOK: type: DESCTABLE
@@ -360,12 +370,18 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_null_part
 PREHOOK: Input: default@stats_null_part@dt=1
 PREHOOK: Input: default@stats_null_part@dt=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Output: default@stats_null_part
+PREHOOK: Output: default@stats_null_part@dt=1
+PREHOOK: Output: default@stats_null_part@dt=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_null_part compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_null_part
 POSTHOOK: Input: default@stats_null_part@dt=1
 POSTHOOK: Input: default@stats_null_part@dt=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Output: default@stats_null_part
+POSTHOOK: Output: default@stats_null_part@dt=1
+POSTHOOK: Output: default@stats_null_part@dt=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
 PREHOOK: query: describe formatted stats_null_part partition(dt = 1) a
 PREHOOK: type: DESCTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats_partial_size.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_partial_size.q.out b/ql/src/test/results/clientpositive/stats_partial_size.q.out
index 4a4061d..09129f2 100644
--- a/ql/src/test/results/clientpositive/stats_partial_size.q.out
+++ b/ql/src/test/results/clientpositive/stats_partial_size.q.out
@@ -28,10 +28,12 @@ POSTHOOK: Output: default@sample
 PREHOOK: query: analyze table sample compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@sample
+PREHOOK: Output: default@sample
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table sample compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@sample
+POSTHOOK: Output: default@sample
 #### A masked pattern was here ####
 PREHOOK: query: explain select sample_partitioned.x from sample_partitioned, sample where sample.y = sample_partitioned.y
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out b/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out
new file mode 100644
index 0000000..34f7023
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out
@@ -0,0 +1,191 @@
+PREHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING)
+partitioned by (ds string, hr string)
+stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@analyze_srcpart_partial_scan
+POSTHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING)
+partitioned by (ds string, hr string)
+stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@analyze_srcpart_partial_scan
+PREHOOK: query: insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@analyze_srcpart_partial_scan
+POSTHOOK: query: insert overwrite table analyze_srcpart_partial_scan partition (ds, hr) select * from srcpart where ds is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-09/hr=11
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-09/hr=12
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_srcpart_partial_scan PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	analyze_srcpart_partial_scan	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	1                   
+	totalSize           	5293                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain
+analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-2
+    Partial Scan Statistics
+
+  Stage: Stage-1
+    Stats Work
+      Basic Stats Work:
+
+PREHOOK: query: analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+PREHOOK: Input: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+PREHOOK: Output: default@analyze_srcpart_partial_scan
+PREHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+POSTHOOK: query: analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: Input: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@analyze_srcpart_partial_scan
+POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11
+PREHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	analyze_srcpart_partial_scan	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	4812                
+	totalSize           	5293                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-09',hr=11)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-09',hr=11)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-09, 11]    	 
+Database:           	default             	 
+Table:              	analyze_srcpart_partial_scan	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	1                   
+	totalSize           	5293                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table analyze_srcpart_partial_scan
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@analyze_srcpart_partial_scan
+PREHOOK: Output: default@analyze_srcpart_partial_scan
+POSTHOOK: query: drop table analyze_srcpart_partial_scan
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@analyze_srcpart_partial_scan
+POSTHOOK: Output: default@analyze_srcpart_partial_scan

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/stats_ppr_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_ppr_all.q.out b/ql/src/test/results/clientpositive/stats_ppr_all.q.out
index 3b9bb30..f19e3f5 100644
--- a/ql/src/test/results/clientpositive/stats_ppr_all.q.out
+++ b/ql/src/test/results/clientpositive/stats_ppr_all.q.out
@@ -46,6 +46,10 @@ PREHOOK: Input: default@ss
 PREHOOK: Input: default@ss@country=US/year=2015/month=1/day=1
 PREHOOK: Input: default@ss@country=US/year=2015/month=1/day=2
 PREHOOK: Input: default@ss@country=US/year=2015/month=2/day=1
+PREHOOK: Output: default@ss
+PREHOOK: Output: default@ss@country=US/year=2015/month=1/day=1
+PREHOOK: Output: default@ss@country=US/year=2015/month=1/day=2
+PREHOOK: Output: default@ss@country=US/year=2015/month=2/day=1
 #### A masked pattern was here ####
 POSTHOOK: query: ANALYZE TABLE ss PARTITION(country,year,month,day) compute statistics for columns
 POSTHOOK: type: QUERY
@@ -53,6 +57,10 @@ POSTHOOK: Input: default@ss
 POSTHOOK: Input: default@ss@country=US/year=2015/month=1/day=1
 POSTHOOK: Input: default@ss@country=US/year=2015/month=1/day=2
 POSTHOOK: Input: default@ss@country=US/year=2015/month=2/day=1
+POSTHOOK: Output: default@ss
+POSTHOOK: Output: default@ss@country=US/year=2015/month=1/day=1
+POSTHOOK: Output: default@ss@country=US/year=2015/month=1/day=2
+POSTHOOK: Output: default@ss@country=US/year=2015/month=2/day=1
 #### A masked pattern was here ####
 PREHOOK: query: explain select sum(order_amount) from ss where (country="US" and year=2015 and month=2 and day=1)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
index 28c82b8..7abb6b2 100644
--- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
+++ b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
@@ -233,7 +233,8 @@ STAGE PLANS:
               name: default.src_5
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-6
     Map Reduce
@@ -293,7 +294,8 @@ STAGE PLANS:
               name: default.src_4
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: from src b 
@@ -699,7 +701,8 @@ STAGE PLANS:
               name: default.src_5
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-16
     Map Reduce Local Work
@@ -763,7 +766,8 @@ STAGE PLANS:
               name: default.src_4
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Map Reduce


[16/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/autoColumnStats_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_10.q.out b/ql/src/test/results/clientpositive/autoColumnStats_10.q.out
new file mode 100644
index 0000000..6cb51fd
--- /dev/null
+++ b/ql/src/test/results/clientpositive/autoColumnStats_10.q.out
@@ -0,0 +1,516 @@
+PREHOOK: query: drop table p
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table p
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@p
+POSTHOOK: query: CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@p
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	tinyint             	                    
+c2                  	smallint            	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\",\"insert_num\":\"true\"}}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into p values (1,22,333)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p
+POSTHOOK: query: insert into p values (1,22,333)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p
+POSTHOOK: Lineage: p.c1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: p.c2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: p.insert_num EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	tinyint             	                    
+c2                  	smallint            	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\",\"insert_num\":\"true\"}}
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	8                   
+	totalSize           	9                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: alter table p replace columns (insert_num int, c1 STRING, c2 STRING)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@p
+PREHOOK: Output: default@p
+POSTHOOK: query: alter table p replace columns (insert_num int, c1 STRING, c2 STRING)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@p
+POSTHOOK: Output: default@p
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}
+#### A masked pattern was here ####
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	8                   
+	totalSize           	9                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted p insert_num
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p insert_num
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	insert_num          	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	1                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	1                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: desc formatted p c1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p c1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	c1                  	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: insert into p values (2,11,111)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p
+POSTHOOK: query: insert into p values (2,11,111)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p
+POSTHOOK: Lineage: p.c1 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: p.c2 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: p.insert_num EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}
+#### A masked pattern was here ####
+	numFiles            	2                   
+	numRows             	2                   
+	rawDataSize         	16                  
+	totalSize           	18                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted p insert_num
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p insert_num
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	insert_num          	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	2                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	2                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: desc formatted p c1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p c1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	c1                  	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"insert_num\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: drop table p
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@p
+PREHOOK: Output: default@p
+POSTHOOK: query: drop table p
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@p
+POSTHOOK: Output: default@p
+PREHOOK: query: CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@p
+POSTHOOK: query: CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@p
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	tinyint             	                    
+c2                  	smallint            	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"c1\":\"true\",\"c2\":\"true\",\"insert_num\":\"true\"}}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: insert into p values (1,22,333)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p
+POSTHOOK: query: insert into p values (1,22,333)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p
+POSTHOOK: Lineage: p.c1 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: p.c2 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: p.insert_num EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	tinyint             	                    
+c2                  	smallint            	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	8                   
+	totalSize           	9                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: alter table p replace columns (insert_num int, c1 STRING, c2 STRING)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@p
+PREHOOK: Output: default@p
+POSTHOOK: query: alter table p replace columns (insert_num int, c1 STRING, c2 STRING)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: default@p
+POSTHOOK: Output: default@p
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+#### A masked pattern was here ####
+	numFiles            	1                   
+	numRows             	1                   
+	rawDataSize         	8                   
+	totalSize           	9                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted p insert_num
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p insert_num
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	insert_num          	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: desc formatted p c1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p c1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	c1                  	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: insert into p values (2,11,111)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@p
+POSTHOOK: query: insert into p values (2,11,111)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@p
+POSTHOOK: Lineage: p.c1 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: p.c2 SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: p.insert_num EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted p
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+# col_name            	data_type           	comment             
+insert_num          	int                 	                    
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+#### A masked pattern was here ####
+	numFiles            	2                   
+	numRows             	2                   
+	rawDataSize         	16                  
+	totalSize           	18                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted p insert_num
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p insert_num
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	insert_num          	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: desc formatted p c1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@p
+POSTHOOK: query: desc formatted p c1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@p
+col_name            	c1                  	 	 	 	 	 	 	 	 	 	 
+data_type           	string              	 	 	 	 	 	 	 	 	 	 
+min                 	                    	 	 	 	 	 	 	 	 	 	 
+max                 	                    	 	 	 	 	 	 	 	 	 	 
+num_nulls           	                    	 	 	 	 	 	 	 	 	 	 
+distinct_count      	                    	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 


[04/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out
index 16da589..4fc0d8c 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.nzhang_part5
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table nzhang_part5 partition (value) select key, value from src
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
index 8322177..fea2d51 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
@@ -366,7 +366,8 @@ STAGE PLANS:
               name: default.nzhang_part8
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-1
@@ -398,7 +399,8 @@ STAGE PLANS:
               name: default.nzhang_part8
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: from srcpart

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out
index eb242af..4bb08a0 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out
@@ -83,7 +83,8 @@ STAGE PLANS:
               name: default.nzhang_part9
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from srcpart
 insert overwrite table nzhang_part9 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/mapreduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/mapreduce1.q.out b/ql/src/test/results/clientpositive/spark/mapreduce1.q.out
index d75b482..6883343 100644
--- a/ql/src/test/results/clientpositive/spark/mapreduce1.q.out
+++ b/ql/src/test/results/clientpositive/spark/mapreduce1.q.out
@@ -82,7 +82,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/mapreduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/mapreduce2.q.out b/ql/src/test/results/clientpositive/spark/mapreduce2.q.out
index adfb503..462a186 100644
--- a/ql/src/test/results/clientpositive/spark/mapreduce2.q.out
+++ b/ql/src/test/results/clientpositive/spark/mapreduce2.q.out
@@ -79,7 +79,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/merge1.q.out b/ql/src/test/results/clientpositive/spark/merge1.q.out
index 8e671e9..eee05db 100644
--- a/ql/src/test/results/clientpositive/spark/merge1.q.out
+++ b/ql/src/test/results/clientpositive/spark/merge1.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -563,7 +564,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -673,7 +675,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/merge2.q.out b/ql/src/test/results/clientpositive/spark/merge2.q.out
index 24116cb..c210fe4 100644
--- a/ql/src/test/results/clientpositive/spark/merge2.q.out
+++ b/ql/src/test/results/clientpositive/spark/merge2.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -563,7 +564,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -673,7 +675,8 @@ STAGE PLANS:
               name: default.test1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
index 2891c7d..7583650 100644
--- a/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
+++ b/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
@@ -419,40 +419,54 @@ STAGE PLANS:
 PREHOOK: query: analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl
+PREHOOK: Output: default@stats_tbl
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl
+POSTHOOK: Output: default@stats_tbl
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2010
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2010
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2010
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2010
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2011
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2011
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2011
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2011
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2012
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2012
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2012
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2012
 #### A masked pattern was here ####
 PREHOOK: query: explain 
 select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out b/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out
index 6376aa7..79d9d27 100644
--- a/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out
+++ b/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out
@@ -126,21 +126,29 @@ PREHOOK: query: analyze table stats_tbl_part partition(dt=2010) compute statisti
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2010
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2010
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2010
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2010
 #### A masked pattern was here ####
 PREHOOK: query: analyze table stats_tbl_part partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_tbl_part
 PREHOOK: Input: default@stats_tbl_part@dt=2014
+PREHOOK: Output: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part@dt=2014
 #### A masked pattern was here ####
 POSTHOOK: query: analyze table stats_tbl_part partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@stats_tbl_part
 POSTHOOK: Input: default@stats_tbl_part@dt=2014
+POSTHOOK: Output: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part@dt=2014
 #### A masked pattern was here ####
 PREHOOK: query: explain 
 select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert.q.out b/ql/src/test/results/clientpositive/spark/multi_insert.q.out
index 4ed1e50..25730b8 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert.q.out
@@ -83,7 +83,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -96,7 +97,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -220,7 +222,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -233,7 +236,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -357,7 +361,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -370,7 +375,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -494,7 +500,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -507,7 +514,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -647,7 +655,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -660,7 +669,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -793,7 +803,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -806,7 +817,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -939,7 +951,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -952,7 +965,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -1085,7 +1099,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1098,7 +1113,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -1242,7 +1258,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1255,7 +1272,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1425,7 +1443,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1438,7 +1457,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1608,7 +1628,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1621,7 +1642,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1791,7 +1813,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1804,7 +1827,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
index c86731c..3c478da 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
@@ -111,7 +111,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -124,7 +125,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE e1
@@ -296,7 +298,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-0
     Move Operator
@@ -309,7 +312,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src
 INSERT OVERWRITE TABLE e1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out
index 3d67b1d..abe331e 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out
@@ -101,7 +101,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -114,7 +115,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key, cast(key as double) as value from src order by key) a
 INSERT OVERWRITE TABLE e1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
index 813704f..96d404c 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
@@ -117,7 +117,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -130,7 +131,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM (select key, cast(key as double) as keyD, value from src order by key) a
@@ -227,7 +229,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -240,7 +243,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key, cast(key as double) as keyD, value from src order by key) a
 INSERT OVERWRITE TABLE e1
@@ -1677,7 +1681,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1690,7 +1695,8 @@ STAGE PLANS:
               name: default.e3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: explain
 FROM (select key, cast(key as double) as keyD, value from src order by key) a
@@ -1834,7 +1840,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1847,7 +1854,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -1860,5 +1868,6 @@ STAGE PLANS:
               name: default.e3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
index 7b804da..903bf1b 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
@@ -159,7 +159,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -172,7 +173,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C
@@ -421,7 +423,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -434,7 +437,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
@@ -659,7 +663,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -672,7 +677,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -685,7 +691,8 @@ STAGE PLANS:
               name: default.src_lv3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
@@ -969,7 +976,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -982,7 +990,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -995,7 +1004,8 @@ STAGE PLANS:
               name: default.src_lv3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C
@@ -1340,7 +1350,8 @@ STAGE PLANS:
               name: default.src_lv1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1353,7 +1364,8 @@ STAGE PLANS:
               name: default.src_lv2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -1366,7 +1378,8 @@ STAGE PLANS:
               name: default.src_lv3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator
@@ -1379,7 +1392,8 @@ STAGE PLANS:
               name: default.src_lv4
 
   Stage: Stage-8
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src_10
 insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out
index 2b28d53..56d7d80 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out
@@ -176,7 +176,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -189,7 +190,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -202,7 +204,8 @@ STAGE PLANS:
               name: default.src_multi3
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select key, count(1) group by key order by key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
index 85d57b1..db80783 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
@@ -87,7 +87,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -100,7 +101,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -228,7 +230,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -241,7 +244,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -369,7 +373,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -382,7 +387,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -510,7 +516,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -523,7 +530,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10
@@ -667,7 +675,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -680,7 +689,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -817,7 +827,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -830,7 +841,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -967,7 +979,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -980,7 +993,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -1117,7 +1131,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1130,7 +1145,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from src
 insert overwrite table src_multi1 select * where key < 10 group by key, value
@@ -1278,7 +1294,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1291,7 +1308,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1465,7 +1483,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1478,7 +1497,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1652,7 +1672,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1665,7 +1686,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -1839,7 +1861,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1852,7 +1875,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select * from src  union all select * from src) s
 insert overwrite table src_multi1 select * where key < 10
@@ -2901,7 +2925,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -2914,7 +2939,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -3121,7 +3147,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3134,7 +3161,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -3341,7 +3369,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3354,7 +3383,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -3561,7 +3591,8 @@ STAGE PLANS:
               name: default.src_multi1
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3574,7 +3605,8 @@ STAGE PLANS:
               name: default.src_multi2
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out b/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
index a146a8e..c34d951 100644
--- a/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
+++ b/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out
@@ -157,7 +157,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -170,7 +171,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -291,7 +293,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -304,7 +307,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -425,7 +429,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -438,7 +443,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -531,7 +537,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -544,7 +551,8 @@ STAGE PLANS:
               name: default.dest4
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: EXPLAIN
 FROM TBL
@@ -711,7 +719,8 @@ STAGE PLANS:
               name: default.dest3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -724,7 +733,8 @@ STAGE PLANS:
               name: default.dest2
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -737,5 +747,6 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
index 1407616..e5dd9d1 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge1.q.out
@@ -92,7 +92,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
@@ -179,7 +180,8 @@ STAGE PLANS:
               name: default.orcfile_merge1b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -302,7 +304,8 @@ STAGE PLANS:
               name: default.orcfile_merge1c
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge2.q.out b/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
index b7f1a65..e05ba8c 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge2.q.out
@@ -81,7 +81,8 @@ STAGE PLANS:
               name: default.orcfile_merge2a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge3.q.out b/ql/src/test/results/clientpositive/spark/orc_merge3.q.out
index 81a6013..2887a4b 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge3.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge3.q.out
@@ -107,7 +107,8 @@ STAGE PLANS:
               name: default.orcfile_merge3b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge4.q.out b/ql/src/test/results/clientpositive/spark/orc_merge4.q.out
index 8d433b0..fdc6012 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge4.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge4.q.out
@@ -125,7 +125,8 @@ STAGE PLANS:
               name: default.orcfile_merge3b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
index a7adcb4..c4006ad 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
@@ -69,7 +69,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY
@@ -166,7 +167,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -287,7 +289,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5b concatenate
 PREHOOK: type: ALTER_TABLE_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
index 1028618..66d6dde 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
@@ -72,7 +72,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY
@@ -214,7 +215,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -422,7 +424,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge7.q.out b/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
index 780f5c8..6fc8211 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
 PREHOOK: type: QUERY
@@ -243,7 +244,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -528,7 +530,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out
index 1407616..e5dd9d1 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out
@@ -92,7 +92,8 @@ STAGE PLANS:
               name: default.orcfile_merge1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
@@ -179,7 +180,8 @@ STAGE PLANS:
               name: default.orcfile_merge1b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark
@@ -302,7 +304,8 @@ STAGE PLANS:
               name: default.orcfile_merge1c
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Spark

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
index 01f8bd4..e142d61 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
@@ -68,7 +68,8 @@ STAGE PLANS:
               name: default.orc_merge5b
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
index 0f1db16..db77ce4 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
@@ -67,7 +67,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
 PREHOOK: type: QUERY
@@ -280,7 +281,8 @@ STAGE PLANS:
               name: default.orc_merge5a
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
 PREHOOK: type: ALTER_PARTITION_MERGE

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/parallel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/parallel.q.out b/ql/src/test/results/clientpositive/spark/parallel.q.out
index e31fcf0..ecb8062 100644
--- a/ql/src/test/results/clientpositive/spark/parallel.q.out
+++ b/ql/src/test/results/clientpositive/spark/parallel.q.out
@@ -112,7 +112,8 @@ STAGE PLANS:
               name: default.src_a
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -125,7 +126,8 @@ STAGE PLANS:
               name: default.src_b
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from (select key, value from src group by key, value) s
 insert overwrite table src_a select s.key, s.value group by s.key, s.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/parallel_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/parallel_join1.q.out b/ql/src/test/results/clientpositive/spark/parallel_join1.q.out
index 7fdd48d..db2962f 100644
--- a/ql/src/test/results/clientpositive/spark/parallel_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/parallel_join1.q.out
@@ -95,7 +95,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out b/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
index cb6c2b0..caa6835 100644
--- a/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
+++ b/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
@@ -85,7 +85,8 @@ STAGE PLANS:
           name: default.total_ordered
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table total_ordered as select * from src5 order by key, value
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out
index f373233..42ecd51 100644
--- a/ql/src/test/results/clientpositive/spark/pcr.q.out
+++ b/ql/src/test/results/clientpositive/spark/pcr.q.out
@@ -3677,7 +3677,8 @@ STAGE PLANS:
               name: default.pcr_t2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-1
@@ -3709,7 +3710,8 @@ STAGE PLANS:
               name: default.pcr_t3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: from pcr_t1
@@ -3922,7 +3924,8 @@ STAGE PLANS:
               name: default.pcr_t2
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
   Stage: Stage-1
@@ -3954,7 +3957,8 @@ STAGE PLANS:
               name: default.pcr_t3
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: from pcr_t1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out b/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
index 38b1f6d..e30cf74 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out
@@ -169,7 +169,8 @@ STAGE PLANS:
               name: default.mi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -182,7 +183,8 @@ STAGE PLANS:
               name: default.mi2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -198,7 +200,8 @@ STAGE PLANS:
               name: default.mi3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator
@@ -1442,7 +1445,8 @@ STAGE PLANS:
               name: default.mi1
 
   Stage: Stage-5
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1455,7 +1459,8 @@ STAGE PLANS:
               name: default.mi2
 
   Stage: Stage-6
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-2
     Move Operator
@@ -1471,7 +1476,8 @@ STAGE PLANS:
               name: default.mi3
 
   Stage: Stage-7
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-3
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ptf.q.out b/ql/src/test/results/clientpositive/spark/ptf.q.out
index 82fc9f8..ea59983 100644
--- a/ql/src/test/results/clientpositive/spark/ptf.q.out
+++ b/ql/src/test/results/clientpositive/spark/ptf.q.out
@@ -3097,7 +3097,8 @@ STAGE PLANS:
               name: default.part_4
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -3110,7 +3111,8 @@ STAGE PLANS:
               name: default.part_5
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: from noop(on part 
 partition by p_mfgr 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out b/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out
index 804ff02..bcf09d8 100644
--- a/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out
+++ b/ql/src/test/results/clientpositive/spark/reduce_deduplicate.q.out
@@ -167,7 +167,8 @@ STAGE PLANS:
               name: default.bucket5_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table bucket5_1
@@ -375,6 +376,7 @@ STAGE PLANS:
               name: default.complex_tbl_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/sample1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample1.q.out b/ql/src/test/results/clientpositive/spark/sample1.q.out
index ee9eb14..26d6148 100644
--- a/ql/src/test/results/clientpositive/spark/sample1.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample1.q.out
@@ -155,7 +155,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.*

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/sample2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample2.q.out b/ql/src/test/results/clientpositive/spark/sample2.q.out
index 85266d1..dff572e 100644
--- a/ql/src/test/results/clientpositive/spark/sample2.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample2.q.out
@@ -154,7 +154,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/sample4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample4.q.out b/ql/src/test/results/clientpositive/spark/sample4.q.out
index 69e7ee9..8c4640f 100644
--- a/ql/src/test/results/clientpositive/spark/sample4.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample4.q.out
@@ -154,7 +154,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.*

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/sample5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample5.q.out b/ql/src/test/results/clientpositive/spark/sample5.q.out
index 558b2db..4af424c 100644
--- a/ql/src/test/results/clientpositive/spark/sample5.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample5.q.out
@@ -155,7 +155,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/sample6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample6.q.out b/ql/src/test/results/clientpositive/spark/sample6.q.out
index 4b35829..1b49bd3 100644
--- a/ql/src/test/results/clientpositive/spark/sample6.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample6.q.out
@@ -154,7 +154,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.*

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/sample7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample7.q.out b/ql/src/test/results/clientpositive/spark/sample7.q.out
index eae33ca..98c2d92 100644
--- a/ql/src/test/results/clientpositive/spark/sample7.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample7.q.out
@@ -155,7 +155,8 @@ STAGE PLANS:
               name: default.dest1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.* 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/skewjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoin.q.out b/ql/src/test/results/clientpositive/spark/skewjoin.q.out
index fb6f08d..6ca00d7 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoin.q.out
@@ -209,7 +209,8 @@ STAGE PLANS:
               name: default.dest_j1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out b/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
index 3f10ee5..a0adab1 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
@@ -163,7 +163,8 @@ STAGE PLANS:
           name: default.noskew
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table noskew as select a.* from src a join src b on a.key=b.key order by a.key limit 30
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out
index cf76508..775a7c4 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out
@@ -46,6 +46,49 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: desc formatted smb_bucket_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@smb_bucket_1
+POSTHOOK: query: desc formatted smb_bucket_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@smb_bucket_1
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	SORTBUCKETCOLSPREFIX	TRUE                
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	208                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	1                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[Order(col:key, order:1)]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: select count(*) from smb_bucket_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from smb_bucket_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+#### A masked pattern was here ####
+5
 PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
index 9424361..054905d 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
@@ -213,7 +213,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
index d0bb917..6f1ab06 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out
@@ -228,7 +228,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1'
@@ -444,7 +445,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
index 6ed3c21..90d0e0e 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
@@ -79,7 +79,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
@@ -267,7 +268,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
 SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238
@@ -385,7 +387,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
 SELECT a.key, a.value FROM test_table2 a WHERE a.ds = '2'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out
index fb25015..71267d6 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out
@@ -79,7 +79,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out
index 6be9123..314c07c 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out
@@ -93,7 +93,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
 SELECT a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1'
@@ -214,7 +215,8 @@ STAGE PLANS:
               name: default.test_table3
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
 SELECT a.value, a.key, a.value FROM test_table1 a WHERE a.ds = '1'
@@ -1382,5 +1384,6 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out
index 1a9118d..0a63bfb 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out
@@ -79,7 +79,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -162,7 +163,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -244,7 +246,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -327,7 +330,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -410,7 +414,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: drop table test_table2
 PREHOOK: type: DROPTABLE
@@ -492,5 +497,6 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out
index d8bdef2..e4681ba 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out
@@ -75,7 +75,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2
 SELECT * FROM test_table1
@@ -218,7 +219,8 @@ STAGE PLANS:
               name: default.test_table2
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE test_table2
 SELECT * FROM test_table1

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_6.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_6.q.out
index 7cf3cf7..745e221 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_6.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_6.q.out
@@ -140,7 +140,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
@@ -1312,7 +1313,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
@@ -2500,7 +2502,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
@@ -2604,7 +2607,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
index 622b950..e2f68a0 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out
@@ -661,7 +661,8 @@ STAGE PLANS:
               name: default.smb_join_results
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table smb_join_results
 select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join smb_bucket4_2 b on a.key = b.key

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out
index 7e97988..d7b445b 100644
--- a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out
@@ -106,7 +106,8 @@ STAGE PLANS:
           name: default.srcpart_date
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
index 091728a..6a4bea1 100644
--- a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out
@@ -45,7 +45,7 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Stage-2
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.src_orc_merge_test_part"}
@@ -78,7 +78,7 @@ Vertex dependency in root stage
 Reducer 2 <- Map 1 (GROUP)
 
 Stage-2
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.src_orc_merge_test_part"}
@@ -3197,7 +3197,7 @@ Reducer 2 <- Map 1 (PARTITION-LEVEL SORT)
 Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT)
 
 Stage-2
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-3
       Create Table Operator:
         name:default.nzhang_CTAS1
@@ -3247,7 +3247,7 @@ Reducer 2 <- Map 1 (PARTITION-LEVEL SORT)
 Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT)
 
 Stage-2
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-3
       Create Table Operator:
         name:default.nzhang_ctas3
@@ -4809,7 +4809,7 @@ Reducer 6 <- Map 1 (PARTITION-LEVEL SORT)
 Reducer 7 <- Map 1 (PARTITION-LEVEL SORT)
 
 Stage-3
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0
       Move Operator
         table:{"name:":"default.part_4"}
@@ -4862,7 +4862,7 @@ Stage-3
                                 Output:["_col1","_col2","_col5","_col7"]
                               <- Please refer to the previous Map 1 [PARTITION-LEVEL SORT]
 Stage-4
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-1
       Move Operator
         table:{"name:":"default.part_5"}
@@ -5227,7 +5227,7 @@ Vertex dependency in root stage
 Reducer 2 <- Map 1 (PARTITION-LEVEL SORT), Map 3 (PARTITION-LEVEL SORT)
 
 Stage-2
-  Stats-Aggr Operator
+  Stats Work{}
     Stage-0(CONDITIONAL)
       Move Operator
         table:{"name:":"default.dest_j1"}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/spark_multi_insert_parallel_orderby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_multi_insert_parallel_orderby.q.out b/ql/src/test/results/clientpositive/spark/spark_multi_insert_parallel_orderby.q.out
index 371e756..a5068f8 100644
--- a/ql/src/test/results/clientpositive/spark/spark_multi_insert_parallel_orderby.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_multi_insert_parallel_orderby.q.out
@@ -169,7 +169,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -182,7 +183,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key,value from src order by key limit 10) a
 INSERT OVERWRITE TABLE e1
@@ -306,7 +308,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -319,7 +322,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key,value from src order by key) a
 INSERT OVERWRITE TABLE e1 
@@ -1442,7 +1446,8 @@ STAGE PLANS:
               name: default.e1
 
   Stage: Stage-3
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
   Stage: Stage-1
     Move Operator
@@ -1455,7 +1460,8 @@ STAGE PLANS:
               name: default.e2
 
   Stage: Stage-4
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM (select key,value from src order by key) a
 INSERT OVERWRITE TABLE e1 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out
index 0a3ba5f..d9ea36f 100644
--- a/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out
@@ -230,7 +230,8 @@ STAGE PLANS:
               name: default.dest
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) JOIN src src3 ON (src1.key + src2.key = src3.key)
 INSERT OVERWRITE TABLE dest SELECT src1.key, src3.value

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
index 3028249..1a8e9ff 100644
--- a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
@@ -175,7 +175,8 @@ STAGE PLANS:
           name: default.srcpart_date
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: create table srcpart_date stored as orc as select ds as ds, ds as `date` from srcpart group by ds
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats0.q.out b/ql/src/test/results/clientpositive/spark/stats0.q.out
index 66dfa63..0fae83b 100644
--- a/ql/src/test/results/clientpositive/spark/stats0.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats0.q.out
@@ -147,7 +147,8 @@ STAGE PLANS:
               name: default.stats_non_partitioned
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table stats_non_partitioned
@@ -737,7 +738,8 @@ STAGE PLANS:
               name: default.stats_partitioned
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table stats_partitioned partition (ds='1')
 select * from src
@@ -1463,7 +1465,8 @@ STAGE PLANS:
               name: default.stats_non_partitioned
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 #### A masked pattern was here ####
 
 PREHOOK: query: insert overwrite table stats_non_partitioned
@@ -2053,7 +2056,8 @@ STAGE PLANS:
               name: default.stats_partitioned
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table stats_partitioned partition (ds='1')
 select * from src

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats1.q.out b/ql/src/test/results/clientpositive/spark/stats1.q.out
index cdabdce..29777f9 100644
--- a/ql/src/test/results/clientpositive/spark/stats1.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats1.q.out
@@ -96,7 +96,8 @@ STAGE PLANS:
               name: default.tmptable
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: INSERT OVERWRITE TABLE tmptable
 SELECT unionsrc.key, unionsrc.value 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats10.q.out b/ql/src/test/results/clientpositive/spark/stats10.q.out
index 865a3b8..d79fc10 100644
--- a/ql/src/test/results/clientpositive/spark/stats10.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats10.q.out
@@ -68,7 +68,8 @@ STAGE PLANS:
               name: default.bucket3_1
 
   Stage: Stage-2
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
 select * from src
@@ -383,7 +384,8 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table bucket3_1 partition (ds) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats12.q.out b/ql/src/test/results/clientpositive/spark/stats12.q.out
index 9c96819..484a045 100644
--- a/ql/src/test/results/clientpositive/spark/stats12.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats12.q.out
@@ -153,8 +153,9 @@ STAGE PLANS:
               /analyze_srcpart/ds=2008-04-08/hr=12 [analyze_srcpart]
 
   Stage: Stage-1
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: default.analyze_srcpart/
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.analyze_srcpart/
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats13.q.out b/ql/src/test/results/clientpositive/spark/stats13.q.out
index 9e4302b..c410898 100644
--- a/ql/src/test/results/clientpositive/spark/stats13.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats13.q.out
@@ -107,8 +107,9 @@ STAGE PLANS:
               /analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart]
 
   Stage: Stage-1
-    Stats-Aggr Operator
-      Stats Aggregation Key Prefix: default.analyze_srcpart/
+    Stats Work
+      Basic Stats Work:
+          Stats Aggregation Key Prefix: default.analyze_srcpart/
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats2.q.out b/ql/src/test/results/clientpositive/spark/stats2.q.out
index 74e8678..f609702 100644
--- a/ql/src/test/results/clientpositive/spark/stats2.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats2.q.out
@@ -138,7 +138,8 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_t1 partition (ds, hr) compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats3.q.out b/ql/src/test/results/clientpositive/spark/stats3.q.out
index 7d4ed9b..212ddcc 100644
--- a/ql/src/test/results/clientpositive/spark/stats3.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats3.q.out
@@ -54,7 +54,8 @@ STAGE PLANS:
               name: default.hive_test_src
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src
 PREHOOK: type: LOAD

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats5.q.out b/ql/src/test/results/clientpositive/spark/stats5.q.out
index 6684abe..1204200 100644
--- a/ql/src/test/results/clientpositive/spark/stats5.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats5.q.out
@@ -30,7 +30,8 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_src compute statistics
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/results/clientpositive/spark/stats7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats7.q.out b/ql/src/test/results/clientpositive/spark/stats7.q.out
index 9c5a71c..13ca968 100644
--- a/ql/src/test/results/clientpositive/spark/stats7.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats7.q.out
@@ -53,7 +53,8 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 23248 Basic stats: PARTIAL Column stats: NONE
 
   Stage: Stage-1
-    Stats-Aggr Operator
+    Stats Work
+      Basic Stats Work:
 
 PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics
 PREHOOK: type: QUERY


[19/22] hive git commit: HIVE-16827 : Merge stats task and column stats task into a single task (Zoltan Haindrich via Ashutosh Chauhan)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
index 74629d5..b2bd465 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java
@@ -19,136 +19,153 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
+import java.util.HashSet;
+import java.util.Set;
 
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-
+import org.apache.hadoop.hive.ql.session.SessionState;
 
 /**
- * ConditionalStats.
+ * Stats Work, may include basic stats work and column stats desc
  *
  */
-@Explain(displayName = "Stats-Aggr Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+@Explain(displayName = "Stats Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
 public class StatsWork implements Serializable {
-  private static final long serialVersionUID = 1L;
-
-  private TableSpec tableSpecs;         // source table spec -- for TableScanOperator
-  private LoadTableDesc loadTableDesc;  // same as MoveWork.loadTableDesc -- for FileSinkOperator
-  private LoadFileDesc loadFileDesc;    // same as MoveWork.loadFileDesc -- for FileSinkOperator
-  private String aggKey;                // aggregation key prefix
-  private boolean statsReliable;        // are stats completely reliable
-
-  // If stats aggregator is not present, clear the current aggregator stats.
-  // For eg. if a merge is being performed, stats already collected by aggregator (numrows etc.)
-  // are still valid. However, if a load file is being performed, the old stats collected by
-  // aggregator are not valid. It might be a good idea to clear them instead of leaving wrong
-  // and old stats.
-  // Since HIVE-12661, we maintain the old stats (although may be wrong) for CBO
-  // purpose. We use a flag COLUMN_STATS_ACCURATE to
-  // show the accuracy of the stats.
 
-  private boolean clearAggregatorStats = false;
-
-  private boolean noStatsAggregator = false;
+  private static final long serialVersionUID = 1L;
+  // this is for basic stats
+  private BasicStatsWork basicStatsWork;
+  private BasicStatsNoJobWork basicStatsNoJobWork;
+  private ColumnStatsDesc colStats;
+  private static final int LIMIT = -1;
 
-  private boolean isNoScanAnalyzeCommand = false;
+  private String currentDatabase;
+  private boolean statsReliable;
+  private Table table;
+  private boolean truncate;
+  private boolean footerScan;
+  private Set<Partition> partitions = new HashSet<>();
 
-  // sourceTask for TS is not changed (currently) but that of FS might be changed
-  // by various optimizers (auto.convert.join, for example)
-  // so this is set by DriverContext in runtime
-  private transient Task sourceTask;
+  public StatsWork(Table table, BasicStatsWork basicStatsWork, HiveConf hconf) {
+    super();
+    this.table = table;
+    this.basicStatsWork = basicStatsWork;
+    this.currentDatabase = SessionState.get().getCurrentDatabase();
+    statsReliable = hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE);
+    basicStatsWork.setStatsReliable(statsReliable);
+  }
 
-  // used by FS based stats collector
-  private String statsTmpDir;
+  public StatsWork(Table table, HiveConf hconf) {
+    super();
+    this.table = table;
+    this.currentDatabase = SessionState.get().getCurrentDatabase();
+    statsReliable = hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE);
+  }
 
-  public StatsWork() {
+  @Override
+  public String toString() {
+    return String.format("StatWork; fetch: %s", getfWork());
   }
 
-  public StatsWork(TableSpec tableSpecs) {
-    this.tableSpecs = tableSpecs;
+  FetchWork getfWork() {
+    return colStats == null ? null : colStats.getFWork();
   }
 
-  public StatsWork(LoadTableDesc loadTableDesc) {
-    this.loadTableDesc = loadTableDesc;
+  @Explain(displayName = "Column Stats Desc")
+  public ColumnStatsDesc getColStats() {
+    return colStats;
   }
 
-  public StatsWork(LoadFileDesc loadFileDesc) {
-    this.loadFileDesc = loadFileDesc;
+  public void setColStats(ColumnStatsDesc colStats) {
+    this.colStats = colStats;
   }
 
-  public TableSpec getTableSpecs() {
-    return tableSpecs;
+  // unused / unknown reason
+  @Deprecated
+  public static int getLimit() {
+    return LIMIT;
   }
 
-  public LoadTableDesc getLoadTableDesc() {
-    return loadTableDesc;
+  @Explain(displayName = "Basic Stats Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public BasicStatsWork getBasicStatsWork() {
+    return basicStatsWork;
   }
 
-  public LoadFileDesc getLoadFileDesc() {
-    return loadFileDesc;
+  // only explain uses it
+  @Explain(displayName = "Basic Stats NoJob Work", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public BasicStatsNoJobWork getBasicStatsNoJobWork() {
+    return basicStatsNoJobWork;
   }
 
-  public void setAggKey(String aggK) {
-    aggKey = aggK;
+  public void setSourceTask(Task<?> sourceTask) {
+    basicStatsWork.setSourceTask(sourceTask);
   }
 
-  @Explain(displayName = "Stats Aggregation Key Prefix", explainLevels = { Level.EXTENDED })
-  public String getAggKey() {
-    return aggKey;
+  public String getCurrentDatabaseName() {
+    return currentDatabase;
   }
 
-  public String getStatsTmpDir() {
-    return statsTmpDir;
+  public boolean hasColStats() {
+    return colStats != null;
   }
 
-  public void setStatsTmpDir(String statsTmpDir) {
-    this.statsTmpDir = statsTmpDir;
+  public Table getTable() {
+    return table;
   }
 
-  public boolean getNoStatsAggregator() {
-    return noStatsAggregator;
+  public void collectStatsFromAggregator(IStatsGatherDesc conf) {
+    // AggKey in StatsWork is used for stats aggregation while StatsAggPrefix
+    // in FileSinkDesc is used for stats publishing. They should be consistent.
+    basicStatsWork.setAggKey(conf.getStatsAggPrefix());
+    basicStatsWork.setStatsTmpDir(conf.getTmpStatsDir());
+    basicStatsWork.setStatsReliable(statsReliable);
   }
 
-  public void setNoStatsAggregator(boolean noStatsAggregator) {
-    this.noStatsAggregator = noStatsAggregator;
+  public void truncateExisting(boolean truncate) {
+    this.truncate = truncate;
   }
 
-  public boolean isStatsReliable() {
-    return statsReliable;
+
+  public void setFooterScan() {
+    basicStatsNoJobWork = new BasicStatsNoJobWork(table.getTableSpec());
+    basicStatsNoJobWork.setStatsReliable(getStatsReliable());
+    footerScan = true;
   }
 
-  public void setStatsReliable(boolean statsReliable) {
-    this.statsReliable = statsReliable;
+  public void addInputPartitions(Set<Partition> partitions) {
+    this.partitions.addAll(partitions);
   }
 
-  public boolean isClearAggregatorStats() {
-    return clearAggregatorStats;
+  public Set<Partition> getPartitions() {
+    return partitions;
   }
 
-  public void setClearAggregatorStats(boolean clearAggregatorStats) {
-    this.clearAggregatorStats = clearAggregatorStats;
+  public boolean isFooterScan() {
+    return footerScan;
   }
 
-  /**
-   * @return the isNoScanAnalyzeCommand
-   */
-  public boolean isNoScanAnalyzeCommand() {
-    return isNoScanAnalyzeCommand;
+  public boolean getStatsReliable() {
+    return statsReliable;
   }
 
-  /**
-   * @param isNoScanAnalyzeCommand the isNoScanAnalyzeCommand to set
-   */
-  public void setNoScanAnalyzeCommand(boolean isNoScanAnalyzeCommand) {
-    this.isNoScanAnalyzeCommand = isNoScanAnalyzeCommand;
+  public String getFullTableName() {
+    return table.getDbName() + "." + table.getTableName();
   }
 
   public Task getSourceTask() {
-    return sourceTask;
+    return basicStatsWork == null ? null : basicStatsWork.getSourceTask();
+  }
+
+  public String getAggKey() {
+    return basicStatsWork.getAggKey();
   }
 
-  public void setSourceTask(Task sourceTask) {
-    this.sourceTask = sourceTask;
+  public boolean isAggregating() {
+    return basicStatsWork != null && basicStatsWork.getAggKey() != null;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
index 75d0f43..237c8cf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
  * things will be added here as table scan is invoked as part of local work.
  **/
 @Explain(displayName = "TableScan", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class TableScanDesc extends AbstractOperatorDesc {
+public class TableScanDesc extends AbstractOperatorDesc implements IStatsGatherDesc {
   private static final long serialVersionUID = 1L;
 
   private String alias;
@@ -263,11 +263,13 @@ public class TableScanDesc extends AbstractOperatorDesc {
     this.gatherStats = gatherStats;
   }
 
+  @Override
   @Explain(displayName = "GatherStats", explainLevels = { Level.EXTENDED })
   public boolean isGatherStats() {
     return gatherStats;
   }
 
+  @Override
   public String getTmpStatsDir() {
     return tmpStatsDir;
   }
@@ -296,6 +298,7 @@ public class TableScanDesc extends AbstractOperatorDesc {
     statsAggKeyPrefix = k;
   }
 
+  @Override
   @Explain(displayName = "Statistics Aggregation Key Prefix", explainLevels = { Level.EXTENDED })
   public String getStatsAggPrefix() {
     return statsAggKeyPrefix;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
new file mode 100644
index 0000000..d1f7652
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
@@ -0,0 +1,385 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.stats;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.HiveStatsUtils;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.apache.hadoop.hive.ql.exec.StatsTask;
+import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
+import org.apache.hadoop.hive.ql.plan.BasicStatsNoJobWork;
+import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hive.common.util.ReflectionUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Function;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimaps;
+
+/**
+ * StatsNoJobTask is used in cases where stats collection is the only task for the given query (no
+ * parent MR or Tez job). It is used in the following cases 1) ANALYZE with noscan for
+ * file formats that implement StatsProvidingRecordReader interface: ORC format (implements
+ * StatsProvidingRecordReader) stores column statistics for all columns in the file footer. Its much
+ * faster to compute the table/partition statistics by reading the footer than scanning all the
+ * rows. This task can be used for computing basic stats like numFiles, numRows, fileSize,
+ * rawDataSize from ORC footer.
+ **/
+public class BasicStatsNoJobTask implements IStatsProcessor {
+
+  private static transient final Logger LOG = LoggerFactory.getLogger(BasicStatsNoJobTask.class);
+  private HiveConf conf;
+
+  private BasicStatsNoJobWork work;
+  private LogHelper console;
+
+  public BasicStatsNoJobTask(HiveConf conf, BasicStatsNoJobWork work) {
+    this.conf = conf;
+    this.work = work;
+    console = new LogHelper(LOG);
+  }
+
+
+  @Override
+  public void initialize(CompilationOpContext opContext) {
+
+  }
+
+  @Override
+  public int process(Hive db, Table tbl) throws Exception {
+
+    LOG.info("Executing stats (no job) task");
+
+    ExecutorService threadPool = StatsTask.newThreadPool(conf);
+
+    return aggregateStats(threadPool, db);
+  }
+
+  public StageType getType() {
+    return StageType.STATS;
+  }
+
+  public String getName() {
+    return "STATS-NO-JOB";
+  }
+
+  static class StatItem {
+    Partish partish;
+    Map<String, String> params;
+    Object result;
+  }
+
+  static class FooterStatCollector implements Runnable {
+
+    private Partish partish;
+    private Object result;
+    private JobConf jc;
+    private Path dir;
+    private FileSystem fs;
+    private LogHelper console;
+
+    public FooterStatCollector(JobConf jc, Partish partish) {
+      this.jc = jc;
+      this.partish = partish;
+    }
+
+    public static final Function<FooterStatCollector, String> SIMPLE_NAME_FUNCTION = new Function<FooterStatCollector, String>() {
+
+      @Override
+      public String apply(FooterStatCollector sc) {
+        return String.format("%s#%s", sc.partish.getTable().getCompleteName(), sc.partish.getPartishType());
+      }
+    };
+    private static final Function<FooterStatCollector, Partition> EXTRACT_RESULT_FUNCTION = new Function<FooterStatCollector, Partition>() {
+      @Override
+      public Partition apply(FooterStatCollector input) {
+        return (Partition) input.result;
+      }
+    };
+
+    private boolean isValid() {
+      return result != null;
+    }
+
+    public void init(HiveConf conf, LogHelper console) throws IOException {
+      this.console = console;
+      dir = new Path(partish.getPartSd().getLocation());
+      fs = dir.getFileSystem(conf);
+    }
+
+    @Override
+    public void run() {
+
+      Map<String, String> parameters = partish.getPartParameters();
+      try {
+        long numRows = 0;
+        long rawDataSize = 0;
+        long fileSize = 0;
+        long numFiles = 0;
+        LOG.debug("Aggregating stats for {}", dir);
+        FileStatus[] fileList = HiveStatsUtils.getFileStatusRecurse(dir, -1, fs);
+
+        for (FileStatus file : fileList) {
+          LOG.debug("Computing stats for {}", file);
+          if (!file.isDirectory()) {
+            InputFormat<?, ?> inputFormat = ReflectionUtil.newInstance(partish.getInputFormatClass(), jc);
+            InputSplit dummySplit = new FileSplit(file.getPath(), 0, 0, new String[] { partish.getLocation() });
+            if (file.getLen() == 0) {
+              numFiles += 1;
+            } else {
+              org.apache.hadoop.mapred.RecordReader<?, ?> recordReader = inputFormat.getRecordReader(dummySplit, jc, Reporter.NULL);
+              try {
+                if (recordReader instanceof StatsProvidingRecordReader) {
+                  StatsProvidingRecordReader statsRR;
+                  statsRR = (StatsProvidingRecordReader) recordReader;
+                  rawDataSize += statsRR.getStats().getRawDataSize();
+                  numRows += statsRR.getStats().getRowCount();
+                  fileSize += file.getLen();
+                  numFiles += 1;
+                } else {
+                  throw new HiveException(String.format("Unexpected file found during reading footers for: %s ", file));
+                }
+              } finally {
+                recordReader.close();
+              }
+            }
+          }
+        }
+
+        StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
+
+        parameters.put(StatsSetupConst.ROW_COUNT, String.valueOf(numRows));
+        parameters.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(rawDataSize));
+        parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(fileSize));
+        parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(numFiles));
+
+        if (partish.getPartition() != null) {
+          result = new Partition(partish.getTable(), partish.getPartition().getTPartition());
+        } else {
+          result = new Table(partish.getTable().getTTable());
+        }
+
+        String msg = partish.getSimpleName() + " stats: [" + toString(parameters) + ']';
+        LOG.debug(msg);
+        console.printInfo(msg);
+
+      } catch (Exception e) {
+        console.printInfo("[Warning] could not update stats for " + partish.getSimpleName() + ".", "Failed with exception " + e.getMessage() + "\n" + StringUtils.stringifyException(e));
+      }
+    }
+
+    private String toString(Map<String, String> parameters) {
+      StringBuilder builder = new StringBuilder();
+      for (String statType : StatsSetupConst.supportedStats) {
+        String value = parameters.get(statType);
+        if (value != null) {
+          if (builder.length() > 0) {
+            builder.append(", ");
+          }
+          builder.append(statType).append('=').append(value);
+        }
+      }
+      return builder.toString();
+    }
+
+  }
+
+  private int aggregateStats(ExecutorService threadPool, Hive db) {
+    int ret = 0;
+    try {
+      JobConf jc = new JobConf(conf);
+
+      TableSpec tableSpecs = work.getTableSpecs();
+
+      if (tableSpecs == null) {
+        throw new RuntimeException("this is unexpected...needs some investigation");
+      }
+
+      Table table = tableSpecs.tableHandle;
+
+      Collection<Partition> partitions = null;
+      if (work.getPartitions() == null || work.getPartitions().isEmpty()) {
+        if (table.isPartitioned()) {
+          partitions = tableSpecs.partitions;
+        }
+      } else {
+        partitions = work.getPartitions();
+      }
+
+      LinkedList<Partish> partishes = Lists.newLinkedList();
+      if (partitions == null) {
+        partishes.add(Partish.buildFor(table));
+      } else {
+        for (Partition part : partitions) {
+          partishes.add(Partish.buildFor(table, part));
+        }
+      }
+
+      List<FooterStatCollector> scs = Lists.newArrayList();
+      for (Partish partish : partishes) {
+        scs.add(new FooterStatCollector(jc, partish));
+      }
+
+      for (FooterStatCollector sc : scs) {
+        sc.init(conf, console);
+        threadPool.execute(sc);
+      }
+
+      LOG.debug("Stats collection waiting for threadpool to shutdown..");
+      shutdownAndAwaitTermination(threadPool);
+      LOG.debug("Stats collection threadpool shutdown successful.");
+
+      ret = updatePartitions(db, scs, table);
+
+    } catch (Exception e) {
+      console.printError("Failed to collect footer statistics.", "Failed with exception " + e.getMessage() + "\n" + StringUtils.stringifyException(e));
+      // Fail the query if the stats are supposed to be reliable
+      if (work.isStatsReliable()) {
+        ret = -1;
+      }
+    }
+
+    // The return value of 0 indicates success,
+    // anything else indicates failure
+    return ret;
+  }
+
+  private int updatePartitions(Hive db, List<FooterStatCollector> scs, Table table) throws InvalidOperationException, HiveException {
+
+    String tableFullName = table.getFullyQualifiedName();
+
+    if (scs.isEmpty()) {
+      return 0;
+    }
+    if (work.isStatsReliable()) {
+      for (FooterStatCollector statsCollection : scs) {
+        if (statsCollection.result == null) {
+          LOG.debug("Stats requested to be reliable. Empty stats found: {}", statsCollection.partish.getSimpleName());
+          return -1;
+        }
+      }
+    }
+    List<FooterStatCollector> validColectors = Lists.newArrayList();
+    for (FooterStatCollector statsCollection : scs) {
+      if (statsCollection.isValid()) {
+        validColectors.add(statsCollection);
+      }
+    }
+
+    EnvironmentContext environmentContext = new EnvironmentContext();
+    environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
+
+    ImmutableListMultimap<String, FooterStatCollector> collectorsByTable = Multimaps.index(validColectors, FooterStatCollector.SIMPLE_NAME_FUNCTION);
+
+    LOG.debug("Collectors.size(): {}", collectorsByTable.keySet());
+
+    if (collectorsByTable.keySet().size() < 1) {
+      LOG.warn("Collectors are empty! ; {}", tableFullName);
+    }
+
+    // for now this should be true...
+    assert (collectorsByTable.keySet().size() <= 1);
+
+    LOG.debug("Updating stats for: {}", tableFullName);
+
+    for (String partName : collectorsByTable.keySet()) {
+      ImmutableList<FooterStatCollector> values = collectorsByTable.get(partName);
+
+      if (values == null) {
+        throw new RuntimeException("very intresting");
+      }
+
+      if (values.get(0).result instanceof Table) {
+        db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext);
+        LOG.debug("Updated stats for {}.", tableFullName);
+      } else {
+        if (values.get(0).result instanceof Partition) {
+          List<Partition> results = Lists.transform(values, FooterStatCollector.EXTRACT_RESULT_FUNCTION);
+          db.alterPartitions(tableFullName, results, environmentContext);
+          LOG.debug("Bulk updated {} partitions of {}.", results.size(), tableFullName);
+        } else {
+          throw new RuntimeException("inconsistent");
+        }
+      }
+    }
+    LOG.debug("Updated stats for: {}", tableFullName);
+    return 0;
+  }
+
+  private void shutdownAndAwaitTermination(ExecutorService threadPool) {
+
+    // Disable new tasks from being submitted
+    threadPool.shutdown();
+    try {
+
+      // Wait a while for existing tasks to terminate
+      // XXX this will wait forever... :)
+      while (!threadPool.awaitTermination(10, TimeUnit.SECONDS)) {
+        LOG.debug("Waiting for all stats tasks to finish...");
+      }
+      // Cancel currently executing tasks
+      threadPool.shutdownNow();
+
+      // Wait a while for tasks to respond to being cancelled
+      if (!threadPool.awaitTermination(100, TimeUnit.SECONDS)) {
+        LOG.debug("Stats collection thread pool did not terminate");
+      }
+    } catch (InterruptedException ie) {
+
+      // Cancel again if current thread also interrupted
+      threadPool.shutdownNow();
+
+      // Preserve interrupt status
+      Thread.currentThread().interrupt();
+    }
+  }
+
+  @Override
+  public void setDpPartSpecs(Collection<Partition> dpPartSpecs) {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
new file mode 100644
index 0000000..ecf3b9d
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
@@ -0,0 +1,499 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.hive.ql.stats;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec;
+import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
+import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
+import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
+import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * StatsTask implementation. StatsTask mainly deals with "collectable" stats. These are
+ * stats that require data scanning and are collected during query execution (unless the user
+ * explicitly requests data scanning just for the purpose of stats computation using the "ANALYZE"
+ * command. All other stats are computed directly by the MetaStore. The rationale being that the
+ * MetaStore layer covers all Thrift calls and provides better guarantees about the accuracy of
+ * those stats.
+ **/
+public class BasicStatsTask implements Serializable, IStatsProcessor {
+
+  private static final long serialVersionUID = 1L;
+  private static transient final Logger LOG = LoggerFactory.getLogger(BasicStatsTask.class);
+
+  private Table table;
+  private Collection<Partition> dpPartSpecs;
+  public boolean followedColStats;
+  private BasicStatsWork work;
+  private HiveConf conf;
+
+  protected transient LogHelper console;
+
+  public BasicStatsTask(HiveConf conf, BasicStatsWork work) {
+    super();
+    dpPartSpecs = null;
+    this.conf = conf;
+    console = new LogHelper(LOG);
+    this.work = work;
+  }
+
+  @Override
+  public int process(Hive db, Table tbl) throws Exception {
+
+    LOG.info("Executing stats task");
+    table = tbl;
+    return aggregateStats(db);
+  }
+
+  @Override
+  public void initialize(CompilationOpContext opContext) {
+  }
+
+  public StageType getType() {
+    return StageType.STATS;
+  }
+
+  public String getName() {
+    return "STATS";
+  }
+
+  private static class BasicStatsProcessor {
+
+    private Partish partish;
+    private FileStatus[] partfileStatus;
+    private BasicStatsWork work;
+    private boolean atomic;
+    private boolean followedColStats1;
+
+    public BasicStatsProcessor(Partish partish, BasicStatsWork work, HiveConf conf, boolean followedColStats2) {
+      this.partish = partish;
+      this.work = work;
+      atomic = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_ATOMIC);
+      followedColStats1 = followedColStats2;
+    }
+
+    public Object process(StatsAggregator statsAggregator) throws HiveException, MetaException {
+      Partish p = partish;
+      Map<String, String> parameters = p.getPartParameters();
+      if (p.isAcid()) {
+        StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
+      }
+
+      if (work.isTargetRewritten()) {
+        StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
+      }
+
+      // work.getTableSpecs() == null means it is not analyze command
+      // and then if it is not followed by column stats, we should clean
+      // column stats
+      // FIXME: move this to ColStat related part
+      if (!work.isExplicitAnalyze() && !followedColStats1) {
+        StatsSetupConst.clearColumnStatsState(parameters);
+      }
+      // non-partitioned tables:
+      // XXX: I don't aggree with this logic
+      // FIXME: deprecate atomic? what's its purpose?
+      if (!existStats(parameters) && atomic) {
+        return null;
+      }
+      if(partfileStatus == null){
+        LOG.warn("Partition/partfiles is null for: " + partish.getPartition().getSpec());
+        return null;
+      }
+
+      // The collectable stats for the aggregator needs to be cleared.
+      // For eg. if a file is being loaded, the old number of rows are not valid
+      // XXX: makes no sense for me... possibly not needed anymore
+      if (work.isClearAggregatorStats()) {
+        // we choose to keep the invalid stats and only change the setting.
+        StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
+      }
+
+      updateQuickStats(parameters, partfileStatus);
+      if (StatsSetupConst.areBasicStatsUptoDate(parameters)) {
+        if (statsAggregator != null) {
+          String prefix = getAggregationPrefix(p.getTable(), p.getPartition());
+          updateStats(statsAggregator, parameters, prefix, atomic);
+        }
+      }
+
+      return p.getOutput();
+    }
+
+    public void collectFileStatus(Warehouse wh) throws MetaException {
+      Map<String, String> parameters = partish.getPartParameters();
+      if (!existStats(parameters) && atomic) {
+        return;
+      }
+      partfileStatus = wh.getFileStatusesForSD(partish.getPartSd());
+    }
+
+    @Deprecated
+    private boolean existStats(Map<String, String> parameters) {
+      return parameters.containsKey(StatsSetupConst.ROW_COUNT)
+          || parameters.containsKey(StatsSetupConst.NUM_FILES)
+          || parameters.containsKey(StatsSetupConst.TOTAL_SIZE)
+          || parameters.containsKey(StatsSetupConst.RAW_DATA_SIZE)
+          || parameters.containsKey(StatsSetupConst.NUM_PARTITIONS);
+    }
+
+    private void updateQuickStats(Map<String, String> parameters, FileStatus[] partfileStatus) throws MetaException {
+      MetaStoreUtils.populateQuickStats(partfileStatus, parameters);
+    }
+
+    private String getAggregationPrefix(Table table, Partition partition) throws MetaException {
+      String prefix = getAggregationPrefix0(table, partition);
+      String aggKey = prefix.endsWith(Path.SEPARATOR) ? prefix : prefix + Path.SEPARATOR;
+      return aggKey;
+    }
+
+    private String getAggregationPrefix0(Table table, Partition partition) throws MetaException {
+
+      // prefix is of the form dbName.tblName
+      String prefix = table.getDbName() + "." + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(table.getTableName());
+      // FIXME: this is a secret contract; reusein getAggrKey() creates a more closer relation to the StatsGatherer
+      // prefix = work.getAggKey();
+      prefix = prefix.toLowerCase();
+      if (partition != null) {
+        return Utilities.join(prefix, Warehouse.makePartPath(partition.getSpec()));
+      }
+      return prefix;
+    }
+
+    private void updateStats(StatsAggregator statsAggregator, Map<String, String> parameters, String aggKey, boolean atomic) throws HiveException {
+
+      for (String statType : StatsSetupConst.statsRequireCompute) {
+        String value = statsAggregator.aggregateStats(aggKey, statType);
+        if (value != null && !value.isEmpty()) {
+          long longValue = Long.parseLong(value);
+
+          if (!work.isTargetRewritten()) {
+            String originalValue = parameters.get(statType);
+            if (originalValue != null) {
+              longValue += Long.parseLong(originalValue); // todo: invalid + valid = invalid
+            }
+          }
+          parameters.put(statType, String.valueOf(longValue));
+        } else {
+          if (atomic) {
+            throw new HiveException(ErrorMsg.STATSAGGREGATOR_MISSED_SOMESTATS, statType);
+          }
+        }
+      }
+    }
+
+  }
+
+
+  private int aggregateStats(Hive db) {
+
+    StatsAggregator statsAggregator = null;
+    int ret = 0;
+    StatsCollectionContext scc = null;
+    EnvironmentContext environmentContext = null;
+    environmentContext = new EnvironmentContext();
+    environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
+
+    try {
+      // Stats setup:
+      final Warehouse wh = new Warehouse(conf);
+      if (!getWork().getNoStatsAggregator() && !getWork().isNoScanAnalyzeCommand()) {
+        try {
+          scc = getContext();
+          statsAggregator = createStatsAggregator(scc, conf);
+        } catch (HiveException e) {
+          if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
+            throw e;
+          }
+          console.printError(ErrorMsg.STATS_SKIPPING_BY_ERROR.getErrorCodedMsg(e.toString()));
+        }
+      }
+
+      List<Partition> partitions = getPartitionsList(db);
+
+      String tableFullName = table.getDbName() + "." + table.getTableName();
+
+      List<Partish> partishes = new ArrayList<>();
+
+      if (partitions == null) {
+        Partish p;
+        partishes.add(p = new Partish.PTable(table));
+
+        BasicStatsProcessor basicStatsProcessor = new BasicStatsProcessor(p, work, conf, followedColStats);
+        basicStatsProcessor.collectFileStatus(wh);
+        Object res = basicStatsProcessor.process(statsAggregator);
+
+        if (res == null) {
+          return 0;
+        }
+        db.alterTable(tableFullName, (Table) res, environmentContext);
+
+        if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
+          console.printInfo("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']');
+        }
+        LOG.info("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']');
+
+      } else {
+        // Partitioned table:
+        // Need to get the old stats of the partition
+        // and update the table stats based on the old and new stats.
+
+        List<Partition> updates = new ArrayList<Partition>();
+
+        final ExecutorService pool = buildBasicStatsExecutor();
+
+        final List<Future<Void>> futures = Lists.newLinkedList();
+        List<BasicStatsProcessor> processors = Lists.newLinkedList();
+
+        try {
+          for(final Partition partn : partitions) {
+            Partish p;
+            BasicStatsProcessor bsp = new BasicStatsProcessor(p = new Partish.PPart(table, partn), work, conf, followedColStats);
+            processors.add(bsp);
+
+            futures.add(pool.submit(new Callable<Void>() {
+              @Override
+              public Void call() throws Exception {
+                bsp.collectFileStatus(wh);
+                return null;
+              }
+            }));
+          }
+          pool.shutdown();
+          for (Future<Void> future : futures) {
+            future.get();
+          }
+        } catch (InterruptedException e) {
+          LOG.debug("Cancelling " + futures.size() + " file stats lookup tasks");
+          //cancel other futures
+          for (Future future : futures) {
+            future.cancel(true);
+          }
+          // Fail the query if the stats are supposed to be reliable
+          if (work.isStatsReliable()) {
+            ret = 1;
+          }
+        } finally {
+          if (pool != null) {
+            pool.shutdownNow();
+          }
+          LOG.debug("Finished getting file stats of all partitions!");
+        }
+
+        for (BasicStatsProcessor basicStatsProcessor : processors) {
+          Object res = basicStatsProcessor.process(statsAggregator);
+          if (res == null) {
+            LOG.info("Partition " + basicStatsProcessor.partish.getPartition().getSpec() + " stats: [0]");
+            continue;
+          }
+          updates.add((Partition) res);
+          if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
+            console.printInfo("Partition " + basicStatsProcessor.partish.getPartition().getSpec() + " stats: [" + toString(basicStatsProcessor.partish.getPartParameters()) + ']');
+          }
+          LOG.info("Partition " + basicStatsProcessor.partish.getPartition().getSpec() + " stats: [" + toString(basicStatsProcessor.partish.getPartParameters()) + ']');
+        }
+
+        if (!updates.isEmpty()) {
+          db.alterPartitions(tableFullName, updates, environmentContext);
+        }
+        if (work.isStatsReliable() && updates.size() != processors.size()) {
+          LOG.info("Stats should be reliadble...however seems like there were some issue.. => ret 1");
+          ret = 1;
+        }
+      }
+
+    } catch (Exception e) {
+      console.printInfo("[Warning] could not update stats.",
+          "Failed with exception " + e.getMessage() + "\n"
+              + StringUtils.stringifyException(e));
+
+      // Fail the query if the stats are supposed to be reliable
+      if (work.isStatsReliable()) {
+        ret = 1;
+      }
+    } finally {
+      if (statsAggregator != null) {
+        statsAggregator.closeConnection(scc);
+      }
+    }
+    // The return value of 0 indicates success,
+    // anything else indicates failure
+    return ret;
+  }
+
+  private BasicStatsWork getWork() {
+    return work;
+  }
+
+  private ExecutorService buildBasicStatsExecutor() {
+    //Get the file status up-front for all partitions. Beneficial in cases of blob storage systems
+    int poolSize = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 1);
+    // In case thread count is set to 0, use single thread.
+    poolSize = Math.max(poolSize, 1);
+    final ExecutorService pool = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("stats-updater-thread-%d").build());
+    LOG.debug("Getting file stats of all partitions. threadpool size:" + poolSize);
+    return pool;
+  }
+
+  private StatsAggregator createStatsAggregator(StatsCollectionContext scc, HiveConf conf) throws HiveException {
+    String statsImpl = HiveConf.getVar(conf, HiveConf.ConfVars.HIVESTATSDBCLASS);
+    StatsFactory factory = StatsFactory.newFactory(statsImpl, conf);
+    if (factory == null) {
+      throw new HiveException(ErrorMsg.STATSPUBLISHER_NOT_OBTAINED.getErrorCodedMsg());
+    }
+    // initialize stats publishing table for noscan which has only stats task
+    // the rest of MR task following stats task initializes it in ExecDriver.java
+    StatsPublisher statsPublisher = factory.getStatsPublisher();
+    if (!statsPublisher.init(scc)) { // creating stats table if not exists
+      throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
+    }
+
+    // manufacture a StatsAggregator
+    StatsAggregator statsAggregator = factory.getStatsAggregator();
+    if (!statsAggregator.connect(scc)) {
+      throw new HiveException(ErrorMsg.STATSAGGREGATOR_CONNECTION_ERROR.getErrorCodedMsg(statsImpl));
+    }
+    return statsAggregator;
+  }
+
+  private StatsCollectionContext getContext() throws HiveException {
+
+    StatsCollectionContext scc = new StatsCollectionContext(conf);
+    Task sourceTask = getWork().getSourceTask();
+    if (sourceTask == null) {
+      throw new HiveException(ErrorMsg.STATSAGGREGATOR_SOURCETASK_NULL.getErrorCodedMsg());
+    }
+    scc.setTask(sourceTask);
+    scc.setStatsTmpDir(this.getWork().getStatsTmpDir());
+    return scc;
+  }
+
+
+  private String toString(Map<String, String> parameters) {
+    StringBuilder builder = new StringBuilder();
+    for (String statType : StatsSetupConst.supportedStats) {
+      String value = parameters.get(statType);
+      if (value != null) {
+        if (builder.length() > 0) {
+          builder.append(", ");
+        }
+        builder.append(statType).append('=').append(value);
+      }
+    }
+    return builder.toString();
+  }
+
+  /**
+   * Get the list of partitions that need to update statistics.
+   * TODO: we should reuse the Partitions generated at compile time
+   * since getting the list of partitions is quite expensive.
+   *
+   * @return a list of partitions that need to update statistics.
+   * @throws HiveException
+   */
+  private List<Partition> getPartitionsList(Hive db) throws HiveException {
+    if (work.getLoadFileDesc() != null) {
+      return null; //we are in CTAS, so we know there are no partitions
+    }
+
+    List<Partition> list = new ArrayList<Partition>();
+
+    if (work.getTableSpecs() != null) {
+
+      // ANALYZE command
+      TableSpec tblSpec = work.getTableSpecs();
+      table = tblSpec.tableHandle;
+      if (!table.isPartitioned()) {
+        return null;
+      }
+      // get all partitions that matches with the partition spec
+      List<Partition> partitions = tblSpec.partitions;
+      if (partitions != null) {
+        for (Partition partn : partitions) {
+          list.add(partn);
+        }
+      }
+    } else if (work.getLoadTableDesc() != null) {
+
+      // INSERT OVERWRITE command
+      LoadTableDesc tbd = work.getLoadTableDesc();
+      table = db.getTable(tbd.getTable().getTableName());
+      if (!table.isPartitioned()) {
+        return null;
+      }
+      DynamicPartitionCtx dpCtx = tbd.getDPCtx();
+      if (dpCtx != null && dpCtx.getNumDPCols() > 0) { // dynamic partitions
+        // If no dynamic partitions are generated, dpPartSpecs may not be initialized
+        if (dpPartSpecs != null) {
+          // load the list of DP partitions and return the list of partition specs
+          list.addAll(dpPartSpecs);
+        }
+      } else { // static partition
+        Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
+        list.add(partn);
+      }
+    }
+    return list;
+  }
+
+  public Collection<Partition> getDpPartSpecs() {
+    return dpPartSpecs;
+  }
+
+  @Override
+  public void setDpPartSpecs(Collection<Partition> dpPartSpecs) {
+    this.dpPartSpecs = dpPartSpecs;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
new file mode 100644
index 0000000..7ce7a74
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.stats;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.apache.hadoop.hive.ql.exec.FetchOperator;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
+import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.stats.ColumnStatisticsObjTranslator;
+import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ColStatsProcessor implements IStatsProcessor {
+  private static transient final Logger LOG = LoggerFactory.getLogger(ColStatsProcessor.class);
+
+  private FetchOperator ftOp;
+  private FetchWork fWork;
+  private ColumnStatsDesc colStatDesc;
+  private HiveConf conf;
+  private boolean isStatsReliable;
+
+  public ColStatsProcessor(ColumnStatsDesc colStats, HiveConf conf) {
+    this.conf = conf;
+    fWork = colStats.getFWork();
+    colStatDesc = colStats;
+    isStatsReliable = conf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE);
+  }
+
+  @Override
+  public void initialize(CompilationOpContext opContext) {
+    try {
+      fWork.initializeForFetch(opContext);
+      JobConf job = new JobConf(conf);
+      ftOp = new FetchOperator(fWork, job);
+    } catch (Exception e) {
+      LOG.error(StringUtils.stringifyException(e));
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public int process(Hive db, Table tbl) throws Exception {
+    return persistColumnStats(db, tbl);
+  }
+
+  private List<ColumnStatistics> constructColumnStatsFromPackedRows(Table tbl1) throws HiveException, MetaException, IOException {
+
+    Table tbl = tbl1;
+
+    String partName = null;
+    List<String> colName = colStatDesc.getColName();
+    List<String> colType = colStatDesc.getColType();
+    boolean isTblLevel = colStatDesc.isTblLevel();
+
+    List<ColumnStatistics> stats = new ArrayList<ColumnStatistics>();
+    InspectableObject packedRow;
+    while ((packedRow = ftOp.getNextRow()) != null) {
+      if (packedRow.oi.getCategory() != ObjectInspector.Category.STRUCT) {
+        throw new HiveException("Unexpected object type encountered while unpacking row");
+      }
+
+      List<ColumnStatisticsObj> statsObjs = new ArrayList<ColumnStatisticsObj>();
+      StructObjectInspector soi = (StructObjectInspector) packedRow.oi;
+      List<? extends StructField> fields = soi.getAllStructFieldRefs();
+      List<Object> list = soi.getStructFieldsDataAsList(packedRow.o);
+
+      List<FieldSchema> partColSchema = tbl.getPartCols();
+      // Partition columns are appended at end, we only care about stats column
+      int numOfStatCols = isTblLevel ? fields.size() : fields.size() - partColSchema.size();
+      assert list != null;
+      for (int i = 0; i < numOfStatCols; i++) {
+        StructField structField = fields.get(i);
+        String columnName = colName.get(i);
+        String columnType = colType.get(i);
+        Object values = list.get(i);
+        try {
+          ColumnStatisticsObj statObj = ColumnStatisticsObjTranslator.readHiveStruct(columnName, columnType, structField, values);
+          statsObjs.add(statObj);
+        } catch (Exception e) {
+          if (isStatsReliable) {
+            throw new HiveException("Statistics collection failed while (hive.stats.reliable)", e);
+          } else {
+            LOG.debug("Because {} is infinite or NaN, we skip stats.", columnName, e);
+          }
+        }
+      }
+
+      if (!statsObjs.isEmpty()) {
+
+        if (!isTblLevel) {
+          List<String> partVals = new ArrayList<String>();
+          // Iterate over partition columns to figure out partition name
+          for (int i = fields.size() - partColSchema.size(); i < fields.size(); i++) {
+            Object partVal = ((PrimitiveObjectInspector) fields.get(i).getFieldObjectInspector()).getPrimitiveJavaObject(list.get(i));
+            partVals.add(partVal == null ? // could be null for default partition
+              this.conf.getVar(ConfVars.DEFAULTPARTITIONNAME) : partVal.toString());
+          }
+          partName = Warehouse.makePartName(partColSchema, partVals);
+        }
+
+        ColumnStatisticsDesc statsDesc = buildColumnStatsDesc(tbl, partName, isTblLevel);
+        ColumnStatistics colStats = new ColumnStatistics();
+        colStats.setStatsDesc(statsDesc);
+        colStats.setStatsObj(statsObjs);
+        stats.add(colStats);
+      }
+    }
+    ftOp.clearFetchContext();
+    return stats;
+  }
+
+  private ColumnStatisticsDesc buildColumnStatsDesc(Table table, String partName, boolean isTblLevel) {
+    String dbName = table.getDbName();
+    assert dbName != null;
+    ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
+    statsDesc.setDbName(dbName);
+    statsDesc.setTableName(table.getTableName());
+    statsDesc.setIsTblLevel(isTblLevel);
+
+    if (!isTblLevel) {
+      statsDesc.setPartName(partName);
+    } else {
+      statsDesc.setPartName(null);
+    }
+    return statsDesc;
+  }
+
+  public int persistColumnStats(Hive db, Table tbl) throws HiveException, MetaException, IOException {
+    // Construct a column statistics object from the result
+
+    List<ColumnStatistics> colStats = constructColumnStatsFromPackedRows(tbl);
+    // Persist the column statistics object to the metastore
+    // Note, this function is shared for both table and partition column stats.
+    if (colStats.isEmpty()) {
+      return 0;
+    }
+    SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats);
+    request.setNeedMerge(colStatDesc.isNeedMerge());
+    db.setPartitionColumnStatistics(request);
+    return 0;
+  }
+
+  @Override
+  public void setDpPartSpecs(Collection<Partition> dpPartSpecs) {
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/stats/ColumnStatisticsObjTranslator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColumnStatisticsObjTranslator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColumnStatisticsObjTranslator.java
new file mode 100644
index 0000000..6485526
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColumnStatisticsObjTranslator.java
@@ -0,0 +1,293 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.stats;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Date;
+import org.apache.hadoop.hive.metastore.api.Decimal;
+import org.apache.hadoop.hive.metastore.columnstats.cache.DateColumnStatsDataInspector;
+import org.apache.hadoop.hive.metastore.columnstats.cache.DecimalColumnStatsDataInspector;
+import org.apache.hadoop.hive.metastore.columnstats.cache.DoubleColumnStatsDataInspector;
+import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
+import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.io.DateWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
+
+public class ColumnStatisticsObjTranslator {
+
+  public static ColumnStatisticsObj readHiveStruct(String columnName, String columnType, StructField structField, Object values)
+      throws HiveException
+  {
+    // Get the field objectInspector, fieldName and the field object.
+    ObjectInspector foi = structField.getFieldObjectInspector();
+    Object f = values;
+    String fieldName = structField.getFieldName();
+    ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
+    statsObj.setColName(columnName);
+    statsObj.setColType(columnType);
+    try {
+      unpackStructObject(foi, f, fieldName, statsObj);
+      return statsObj;
+    } catch (Exception e) {
+      throw new HiveException("error calculating stats for column:" + structField.getFieldName(), e);
+    }
+  }
+
+  private static void unpackBooleanStats(ObjectInspector oi, Object o, String fName, ColumnStatisticsObj statsObj) {
+    long v = ((LongObjectInspector) oi).get(o);
+    if (fName.equals("counttrues")) {
+      statsObj.getStatsData().getBooleanStats().setNumTrues(v);
+    } else if (fName.equals("countfalses")) {
+      statsObj.getStatsData().getBooleanStats().setNumFalses(v);
+    } else if (fName.equals("countnulls")) {
+      statsObj.getStatsData().getBooleanStats().setNumNulls(v);
+    }
+  }
+
+  @SuppressWarnings("serial")
+  static class UnsupportedDoubleException extends Exception {
+  }
+
+  private static void unpackDoubleStats(ObjectInspector oi, Object o, String fName, ColumnStatisticsObj statsObj) throws UnsupportedDoubleException {
+    if (fName.equals("countnulls")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getDoubleStats().setNumNulls(v);
+    } else if (fName.equals("numdistinctvalues")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getDoubleStats().setNumDVs(v);
+    } else if (fName.equals("max")) {
+      double d = ((DoubleObjectInspector) oi).get(o);
+      if (Double.isInfinite(d) || Double.isNaN(d)) {
+        throw new UnsupportedDoubleException();
+      }
+      statsObj.getStatsData().getDoubleStats().setHighValue(d);
+    } else if (fName.equals("min")) {
+      double d = ((DoubleObjectInspector) oi).get(o);
+      if (Double.isInfinite(d) || Double.isNaN(d)) {
+        throw new UnsupportedDoubleException();
+      }
+      statsObj.getStatsData().getDoubleStats().setLowValue(d);
+    } else if (fName.equals("ndvbitvector")) {
+      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
+      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
+      statsObj.getStatsData().getDoubleStats().setBitVectors(buf);
+      ;
+    }
+  }
+
+  private static void unpackDecimalStats(ObjectInspector oi, Object o, String fName, ColumnStatisticsObj statsObj) {
+    if (fName.equals("countnulls")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getDecimalStats().setNumNulls(v);
+    } else if (fName.equals("numdistinctvalues")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getDecimalStats().setNumDVs(v);
+    } else if (fName.equals("max")) {
+      HiveDecimal d = ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o);
+      statsObj.getStatsData().getDecimalStats().setHighValue(convertToThriftDecimal(d));
+    } else if (fName.equals("min")) {
+      HiveDecimal d = ((HiveDecimalObjectInspector) oi).getPrimitiveJavaObject(o);
+      statsObj.getStatsData().getDecimalStats().setLowValue(convertToThriftDecimal(d));
+    } else if (fName.equals("ndvbitvector")) {
+      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
+      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
+      statsObj.getStatsData().getDecimalStats().setBitVectors(buf);
+      ;
+    }
+  }
+
+  private static Decimal convertToThriftDecimal(HiveDecimal d) {
+    return new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), (short) d.scale());
+  }
+
+  private static void unpackLongStats(ObjectInspector oi, Object o, String fName, ColumnStatisticsObj statsObj) {
+    if (fName.equals("countnulls")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getLongStats().setNumNulls(v);
+    } else if (fName.equals("numdistinctvalues")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getLongStats().setNumDVs(v);
+    } else if (fName.equals("max")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getLongStats().setHighValue(v);
+    } else if (fName.equals("min")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getLongStats().setLowValue(v);
+    } else if (fName.equals("ndvbitvector")) {
+      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
+      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
+      statsObj.getStatsData().getLongStats().setBitVectors(buf);
+      ;
+    }
+  }
+
+  private static void unpackStringStats(ObjectInspector oi, Object o, String fName, ColumnStatisticsObj statsObj) {
+    if (fName.equals("countnulls")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getStringStats().setNumNulls(v);
+    } else if (fName.equals("numdistinctvalues")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getStringStats().setNumDVs(v);
+    } else if (fName.equals("avglength")) {
+      double d = ((DoubleObjectInspector) oi).get(o);
+      statsObj.getStatsData().getStringStats().setAvgColLen(d);
+    } else if (fName.equals("maxlength")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getStringStats().setMaxColLen(v);
+    } else if (fName.equals("ndvbitvector")) {
+      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
+      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
+      statsObj.getStatsData().getStringStats().setBitVectors(buf);
+      ;
+    }
+  }
+
+  private static void unpackBinaryStats(ObjectInspector oi, Object o, String fName, ColumnStatisticsObj statsObj) {
+    if (fName.equals("countnulls")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getBinaryStats().setNumNulls(v);
+    } else if (fName.equals("avglength")) {
+      double d = ((DoubleObjectInspector) oi).get(o);
+      statsObj.getStatsData().getBinaryStats().setAvgColLen(d);
+    } else if (fName.equals("maxlength")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getBinaryStats().setMaxColLen(v);
+    }
+  }
+
+  private static void unpackDateStats(ObjectInspector oi, Object o, String fName, ColumnStatisticsObj statsObj) {
+    if (fName.equals("countnulls")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getDateStats().setNumNulls(v);
+    } else if (fName.equals("numdistinctvalues")) {
+      long v = ((LongObjectInspector) oi).get(o);
+      statsObj.getStatsData().getDateStats().setNumDVs(v);
+    } else if (fName.equals("max")) {
+      DateWritable v = ((DateObjectInspector) oi).getPrimitiveWritableObject(o);
+      statsObj.getStatsData().getDateStats().setHighValue(new Date(v.getDays()));
+    } else if (fName.equals("min")) {
+      DateWritable v = ((DateObjectInspector) oi).getPrimitiveWritableObject(o);
+      statsObj.getStatsData().getDateStats().setLowValue(new Date(v.getDays()));
+    } else if (fName.equals("ndvbitvector")) {
+      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
+      byte[] buf = ((BinaryObjectInspector) poi).getPrimitiveJavaObject(o);
+      statsObj.getStatsData().getDateStats().setBitVectors(buf);
+      ;
+    }
+  }
+
+  private static void unpackPrimitiveObject(ObjectInspector oi, Object o, String fieldName, ColumnStatisticsObj statsObj) throws UnsupportedDoubleException {
+    if (o == null) {
+      return;
+    }
+    // First infer the type of object
+    if (fieldName.equals("columntype")) {
+      PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
+      String s = ((StringObjectInspector) poi).getPrimitiveJavaObject(o);
+      ColumnStatisticsData statsData = new ColumnStatisticsData();
+
+      if (s.equalsIgnoreCase("long")) {
+        LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+        statsData.setLongStats(longStats);
+        statsObj.setStatsData(statsData);
+      } else if (s.equalsIgnoreCase("double")) {
+        DoubleColumnStatsDataInspector doubleStats = new DoubleColumnStatsDataInspector();
+        statsData.setDoubleStats(doubleStats);
+        statsObj.setStatsData(statsData);
+      } else if (s.equalsIgnoreCase("string")) {
+        StringColumnStatsDataInspector stringStats = new StringColumnStatsDataInspector();
+        statsData.setStringStats(stringStats);
+        statsObj.setStatsData(statsData);
+      } else if (s.equalsIgnoreCase("boolean")) {
+        BooleanColumnStatsData booleanStats = new BooleanColumnStatsData();
+        statsData.setBooleanStats(booleanStats);
+        statsObj.setStatsData(statsData);
+      } else if (s.equalsIgnoreCase("binary")) {
+        BinaryColumnStatsData binaryStats = new BinaryColumnStatsData();
+        statsData.setBinaryStats(binaryStats);
+        statsObj.setStatsData(statsData);
+      } else if (s.equalsIgnoreCase("decimal")) {
+        DecimalColumnStatsDataInspector decimalStats = new DecimalColumnStatsDataInspector();
+        statsData.setDecimalStats(decimalStats);
+        statsObj.setStatsData(statsData);
+      } else if (s.equalsIgnoreCase("date")) {
+        DateColumnStatsDataInspector dateStats = new DateColumnStatsDataInspector();
+        statsData.setDateStats(dateStats);
+        statsObj.setStatsData(statsData);
+      }
+    } else {
+      // invoke the right unpack method depending on data type of the column
+      if (statsObj.getStatsData().isSetBooleanStats()) {
+        unpackBooleanStats(oi, o, fieldName, statsObj);
+      } else if (statsObj.getStatsData().isSetLongStats()) {
+        unpackLongStats(oi, o, fieldName, statsObj);
+      } else if (statsObj.getStatsData().isSetDoubleStats()) {
+        unpackDoubleStats(oi, o, fieldName, statsObj);
+      } else if (statsObj.getStatsData().isSetStringStats()) {
+        unpackStringStats(oi, o, fieldName, statsObj);
+      } else if (statsObj.getStatsData().isSetBinaryStats()) {
+        unpackBinaryStats(oi, o, fieldName, statsObj);
+      } else if (statsObj.getStatsData().isSetDecimalStats()) {
+        unpackDecimalStats(oi, o, fieldName, statsObj);
+      } else if (statsObj.getStatsData().isSetDateStats()) {
+        unpackDateStats(oi, o, fieldName, statsObj);
+      }
+    }
+  }
+
+  private static void unpackStructObject(ObjectInspector oi, Object o, String fName, ColumnStatisticsObj cStatsObj) throws UnsupportedDoubleException {
+    if (oi.getCategory() != ObjectInspector.Category.STRUCT) {
+      throw new RuntimeException("Invalid object datatype : " + oi.getCategory().toString());
+    }
+
+    StructObjectInspector soi = (StructObjectInspector) oi;
+    List<? extends StructField> fields = soi.getAllStructFieldRefs();
+    List<Object> list = soi.getStructFieldsDataAsList(o);
+
+    for (int i = 0; i < fields.size(); i++) {
+      // Get the field objectInspector, fieldName and the field object.
+      ObjectInspector foi = fields.get(i).getFieldObjectInspector();
+      Object f = (list == null ? null : list.get(i));
+      String fieldName = fields.get(i).getFieldName();
+
+      if (foi.getCategory() == ObjectInspector.Category.PRIMITIVE) {
+        unpackPrimitiveObject(foi, f, fieldName, cStatsObj);
+      } else {
+        unpackStructObject(foi, f, fieldName, cStatsObj);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/stats/IStatsProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/IStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/IStatsProcessor.java
new file mode 100644
index 0000000..04219b5
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/IStatsProcessor.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.stats;
+
+import java.util.Collection;
+
+import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+
+public interface IStatsProcessor {
+
+  void initialize(CompilationOpContext opContext);
+
+  int process(Hive db, Table tbl) throws Exception;
+
+  void setDpPartSpecs(Collection<Partition> dpPartSpecs);
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
new file mode 100644
index 0000000..e8d3184
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/Partish.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.stats;
+
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.OutputFormat;
+
+/**
+ * Cover class to make it easier to make modifications on partitions/tables
+ */
+public abstract class Partish {
+
+  public static Partish buildFor(Table table) {
+    return new PTable(table);
+  }
+
+  public static Partish buildFor(Partition part) {
+    return new PPart(part.getTable(), part);
+  }
+
+  public static Partish buildFor(Table table, Partition part) {
+    return new PPart(table, part);
+  }
+
+  // rename
+  @Deprecated
+  public final boolean isAcid() {
+    return AcidUtils.isFullAcidTable(getTable());
+  }
+
+  public abstract Table getTable();
+
+  public abstract Map<String, String> getPartParameters();
+
+  public abstract StorageDescriptor getPartSd();
+
+  public abstract Object getOutput() throws HiveException;
+
+  public abstract Partition getPartition();
+
+  public abstract Class<? extends InputFormat> getInputFormatClass() throws HiveException;
+
+  public abstract Class<? extends OutputFormat> getOutputFormatClass() throws HiveException;
+
+  public abstract String getLocation();
+
+  public abstract String getSimpleName();
+
+  public final String getPartishType() {
+    return getClass().getSimpleName();
+  }
+
+  static class PTable extends Partish {
+    private Table table;
+
+    public PTable(Table table) {
+      this.table = table;
+    }
+
+    @Override
+    public Table getTable() {
+      return table;
+    }
+
+    @Override
+    public Map<String, String> getPartParameters() {
+      return table.getTTable().getParameters();
+    }
+
+    @Override
+    public StorageDescriptor getPartSd() {
+      return table.getTTable().getSd();
+    }
+
+    @Override
+    public Object getOutput() throws HiveException {
+      return new Table(getTable().getTTable());
+    }
+
+    @Override
+    public Partition getPartition() {
+      return null;
+    }
+
+    @Override
+    public Class<? extends InputFormat> getInputFormatClass() {
+      return table.getInputFormatClass();
+    }
+
+    @Override
+    public Class<? extends OutputFormat> getOutputFormatClass() {
+      return table.getOutputFormatClass();
+    }
+
+    @Override
+    public String getLocation() {
+      return table.getDataLocation().toString();
+    }
+
+    @Override
+    public String getSimpleName() {
+      return String.format("Table %s.%s", table.getDbName(), table.getTableName());
+    }
+  }
+
+  static class PPart extends Partish {
+    private Table table;
+    private Partition partition;
+
+    // FIXME: possibly the distinction between table/partition is not need; however it was like this before....will change it later
+    public PPart(Table table, Partition partiton) {
+      this.table = table;
+      partition = partiton;
+    }
+
+    @Override
+    public Table getTable() {
+      return table;
+    }
+
+    @Override
+    public Map<String, String> getPartParameters() {
+      return partition.getTPartition().getParameters();
+    }
+
+    @Override
+    public StorageDescriptor getPartSd() {
+      return partition.getTPartition().getSd();
+    }
+
+    @Override
+    public Object getOutput() throws HiveException {
+      return new Partition(table, partition.getTPartition());
+    }
+
+    @Override
+    public Partition getPartition() {
+      return partition;
+    }
+
+    @Override
+    public Class<? extends InputFormat> getInputFormatClass() throws HiveException {
+      return partition.getInputFormatClass();
+    }
+
+    @Override
+    public Class<? extends OutputFormat> getOutputFormatClass() throws HiveException {
+      return partition.getOutputFormatClass();
+    }
+
+    @Override
+    public String getLocation() {
+      return partition.getLocation();
+    }
+
+    @Override
+    public String getSimpleName() {
+      return String.format("Partition %s.%s %s", table.getDbName(), table.getTableName(), partition.getSpec());
+    }
+
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index 2c76f79..149a9ad 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -835,4 +835,4 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
     int[][] expected = {{0, -1},{0, -1}, {1, -1}, {1, -1}, {2, -1}, {2, -1}, {3, -1}, {3, -1}};
     Assert.assertEquals(stringifyValues(expected), r);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 2faf098..b877253 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -132,6 +132,7 @@ public class TestTxnCommands2 {
         .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
             "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
     hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true);
+    hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false);
 
     TxnDbUtil.setConfValues(hiveConf);
     TxnDbUtil.prepDb(hiveConf);

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java
index 3e4f6f6..8737369 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java
@@ -80,6 +80,7 @@ public abstract class TxnCommandsBaseForTests {
       .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
         "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
     hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true);
+    hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false);
     TxnDbUtil.setConfValues(hiveConf);
     TxnDbUtil.prepDb(hiveConf);
     File f = new File(getWarehouseDir());

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java
index d0a9982..4c865e0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java
@@ -92,7 +92,7 @@ public class TestHiveReduceExpressionsWithStatsRule {
 
     builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock);
 
-    StatsSetupConst.setStatsStateForCreateTable(tableParams, Lists.newArrayList("_int"), "TRUE");
+    StatsSetupConst.setStatsStateForCreateTable(tableParams, Lists.newArrayList("_int"), StatsSetupConst.TRUE);
     tableParams.put(StatsSetupConst.ROW_COUNT, "3");
 
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/autoColumnStats_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_1.q b/ql/src/test/queries/clientpositive/autoColumnStats_1.q
index 7955b07..cc32393 100644
--- a/ql/src/test/queries/clientpositive/autoColumnStats_1.q
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_1.q
@@ -60,6 +60,8 @@ drop table nzhang_part14;
 create table if not exists nzhang_part14 (key string)
   partitioned by (value string);
 
+desc formatted nzhang_part14;
+
 insert overwrite table nzhang_part14 partition(value) 
 select key, value from (
   select * from (select 'k1' as key, cast(null as string) as value from src limit 2)a 
@@ -69,6 +71,8 @@ select key, value from (
   select * from (select 'k3' as key, ' ' as value from src limit 2)c
 ) T;
 
+desc formatted nzhang_part14 partition (value=' ');
+
 explain select key from nzhang_part14;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/autoColumnStats_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_10.q b/ql/src/test/queries/clientpositive/autoColumnStats_10.q
new file mode 100644
index 0000000..bf166d8
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_10.q
@@ -0,0 +1,52 @@
+set hive.mapred.mode=nonstrict;
+set hive.stats.column.autogather=true;
+
+drop table p;
+
+CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint);
+
+desc formatted p;
+
+insert into p values (1,22,333);
+
+desc formatted p;
+
+alter table p replace columns (insert_num int, c1 STRING, c2 STRING);
+
+desc formatted p;
+
+desc formatted p insert_num;
+desc formatted p c1;
+
+insert into p values (2,11,111);
+
+desc formatted p;
+
+desc formatted p insert_num;
+desc formatted p c1;
+
+set hive.stats.column.autogather=false;
+
+drop table p;
+
+CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint);
+
+desc formatted p;
+
+insert into p values (1,22,333);
+
+desc formatted p;
+
+alter table p replace columns (insert_num int, c1 STRING, c2 STRING);
+
+desc formatted p;
+
+desc formatted p insert_num;
+desc formatted p c1;
+
+insert into p values (2,11,111);
+
+desc formatted p;
+
+desc formatted p insert_num;
+desc formatted p c1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/autoColumnStats_5a.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_5a.q b/ql/src/test/queries/clientpositive/autoColumnStats_5a.q
new file mode 100644
index 0000000..a8bce18
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_5a.q
@@ -0,0 +1,30 @@
+set hive.stats.column.autogather=true;
+set hive.mapred.mode=nonstrict;
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+-- SORT_QUERY_RESULTS
+
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+explain extended 
+insert into table partitioned1 partition(part=1) values(1, 'original');
+
+insert into table partitioned1 partition(part=1) values(1, 'original');
+
+desc formatted partitioned1 partition(part=1);
+
+explain extended 
+insert into table partitioned1 partition(part=1) values(2, 'original'), (3, 'original'),(4, 'original');
+
+insert into table partitioned1 partition(part=1) values(2, 'original'), (3, 'original'),(4, 'original');
+
+explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+desc formatted partitioned1;
+desc formatted partitioned1 partition(part=1);
+desc formatted partitioned1 partition(part=1) a;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/basicstat_partval.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/basicstat_partval.q b/ql/src/test/queries/clientpositive/basicstat_partval.q
new file mode 100644
index 0000000..2db472d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/basicstat_partval.q
@@ -0,0 +1,12 @@
+set hive.stats.autogather=true;
+
+CREATE TABLE p1(i int) partitioned by (p string);
+
+insert into p1 partition(p='a') values (1);
+insert into p1 partition(p='A') values (2),(3);
+
+describe formatted p1;
+describe formatted p1 partition(p='a');
+describe formatted p1 partition(p='A');
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/columnstats_partlvl.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/columnstats_partlvl.q b/ql/src/test/queries/clientpositive/columnstats_partlvl.q
index 2c92dfe..4283bca 100644
--- a/ql/src/test/queries/clientpositive/columnstats_partlvl.q
+++ b/ql/src/test/queries/clientpositive/columnstats_partlvl.q
@@ -14,6 +14,8 @@ explain extended
 analyze table Employee_Part partition (employeeSalary=2000.0) compute statistics for columns employeeID;
 analyze table Employee_Part partition (employeeSalary=2000.0) compute statistics for columns employeeID;
 
+describe formatted Employee_Part partition(employeeSalary=2000.0);
+
 explain 
 analyze table Employee_Part partition (employeeSalary=4000.0) compute statistics for columns employeeID;
 explain extended

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q b/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q
index ead9a2d..c065edd 100644
--- a/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q
+++ b/ql/src/test/queries/clientpositive/columnstats_partlvl_dp.q
@@ -16,6 +16,8 @@ explain
 analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID;
 analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID;
 
+describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA');
+
 describe formatted Employee_Part partition (employeeSalary='4000.0', country='USA') employeeName;
 
 -- don't specify all partitioning keys

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/deleteAnalyze.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/deleteAnalyze.q b/ql/src/test/queries/clientpositive/deleteAnalyze.q
index 26123a6..5293ddf 100644
--- a/ql/src/test/queries/clientpositive/deleteAnalyze.q
+++ b/ql/src/test/queries/clientpositive/deleteAnalyze.q
@@ -20,6 +20,8 @@ describe formatted testdeci2 amount;
 
 analyze table testdeci2 compute statistics for columns;
 
+describe formatted testdeci2;
+
 set hive.stats.fetch.column.stats=true;
 
 analyze table testdeci2 compute statistics for columns;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q b/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q
index ceacc24..a89b707 100644
--- a/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q
+++ b/ql/src/test/queries/clientpositive/exec_parallel_column_stats.q
@@ -1,5 +1,7 @@
 set hive.exec.parallel=true;
 
-explain analyze table src compute statistics for columns;
+create table t as select * from src;
 
-analyze table src compute statistics for columns;
\ No newline at end of file
+explain analyze table t compute statistics for columns;
+
+analyze table t compute statistics for columns;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/outer_reference_windowed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/outer_reference_windowed.q b/ql/src/test/queries/clientpositive/outer_reference_windowed.q
index cac6b41..3259ebe 100644
--- a/ql/src/test/queries/clientpositive/outer_reference_windowed.q
+++ b/ql/src/test/queries/clientpositive/outer_reference_windowed.q
@@ -34,6 +34,8 @@ ANALYZE TABLE e011_03 COMPUTE STATISTICS FOR COLUMNS;
 
 set hive.explain.user=false;
 
+describe formatted e011_01;
+
 explain select sum(sum(c1)) over() from e011_01;
 select sum(sum(c1)) over() from e011_01;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec9cc0bc/ql/src/test/queries/clientpositive/smb_mapjoin_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_1.q b/ql/src/test/queries/clientpositive/smb_mapjoin_1.q
index baf1690..b2394ad 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_1.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_1.q
@@ -12,6 +12,9 @@ load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table sm
 load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
 load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
 
+desc formatted smb_bucket_1;
+select count(*) from smb_bucket_1;
+
 set hive.cbo.enable=false;
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;