You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by px...@apache.org on 2016/05/09 17:41:50 UTC

[08/21] hive git commit: HIVE-13341: Stats state is not captured correctly: differentiate load table and create table (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/schema_evol_text_vecrow_mapwork_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/schema_evol_text_vecrow_mapwork_table.q.out b/ql/src/test/results/clientpositive/schema_evol_text_vecrow_mapwork_table.q.out
index 50790f4..561c128 100644
--- a/ql/src/test/results/clientpositive/schema_evol_text_vecrow_mapwork_table.q.out
+++ b/ql/src/test/results/clientpositive/schema_evol_text_vecrow_mapwork_table.q.out
@@ -52,6 +52,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -187,25 +192,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_add_int_permute_select
-            Statistics: Num rows: 10 Data size: 111 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), a (type: int), b (type: string)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 10 Data size: 111 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 111 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: int), _col2 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 10 Data size: 111 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 111 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -318,6 +323,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -456,25 +466,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_add_int_string_permute_select
-            Statistics: Num rows: 10 Data size: 155 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), a (type: int), b (type: string)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 10 Data size: 155 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 155 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: int), _col2 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 10 Data size: 155 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 155 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -740,25 +750,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_string_group_double
-            Statistics: Num rows: 10 Data size: 383 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 151 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: double), c2 (type: double), c3 (type: double), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 383 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 151 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 383 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 151 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 383 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 151 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 383 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 151 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -912,25 +922,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_date_group_string_group_timestamp
-            Statistics: Num rows: 9 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 450 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: string), c2 (type: char(50)), c3 (type: char(15)), c4 (type: varchar(50)), c5 (type: varchar(15)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-              Statistics: Num rows: 9 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 3 Data size: 450 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 9 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 450 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: string), _col2 (type: char(50)), _col3 (type: char(15)), _col4 (type: varchar(50)), _col5 (type: varchar(15)), _col6 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: char(50)), VALUE._col2 (type: char(15)), VALUE._col3 (type: varchar(50)), VALUE._col4 (type: varchar(15)), VALUE._col5 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-          Statistics: Num rows: 9 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 3 Data size: 450 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 9 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 450 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1100,25 +1110,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_date_group_string_group_date
-            Statistics: Num rows: 9 Data size: 555 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 195 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: string), c2 (type: char(50)), c3 (type: char(15)), c4 (type: varchar(50)), c5 (type: varchar(15)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-              Statistics: Num rows: 9 Data size: 555 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 3 Data size: 195 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 9 Data size: 555 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 195 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: string), _col2 (type: char(50)), _col3 (type: char(15)), _col4 (type: varchar(50)), _col5 (type: varchar(15)), _col6 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: char(50)), VALUE._col2 (type: char(15)), VALUE._col3 (type: varchar(50)), VALUE._col4 (type: varchar(15)), VALUE._col5 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-          Statistics: Num rows: 9 Data size: 555 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 3 Data size: 195 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 9 Data size: 555 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 195 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1268,25 +1278,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_multi_ints_string
-            Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), c4 (type: string), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-              Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: string), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1429,25 +1439,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_multi_ints_char
-            Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: char(50)), c2 (type: char(50)), c3 (type: char(50)), c4 (type: char(50)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-              Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: char(50)), _col2 (type: char(50)), _col3 (type: char(50)), _col4 (type: char(50)), _col5 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(50)), VALUE._col1 (type: char(50)), VALUE._col2 (type: char(50)), VALUE._col3 (type: char(50)), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1590,25 +1600,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_multi_ints_char_trunc
-            Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: char(5)), c2 (type: char(5)), c3 (type: char(5)), c4 (type: char(5)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-              Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: char(5)), _col2 (type: char(5)), _col3 (type: char(5)), _col4 (type: char(5)), _col5 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(5)), VALUE._col1 (type: char(5)), VALUE._col2 (type: char(5)), VALUE._col3 (type: char(5)), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1751,25 +1761,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_multi_ints_varchar
-            Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: varchar(50)), c2 (type: varchar(50)), c3 (type: varchar(50)), c4 (type: varchar(50)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-              Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: varchar(50)), _col2 (type: varchar(50)), _col3 (type: varchar(50)), _col4 (type: varchar(50)), _col5 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(50)), VALUE._col1 (type: varchar(50)), VALUE._col2 (type: varchar(50)), VALUE._col3 (type: varchar(50)), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1912,25 +1922,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_multi_ints_varchar_trunc
-            Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: varchar(5)), c2 (type: varchar(5)), c3 (type: varchar(5)), c4 (type: varchar(5)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-              Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: varchar(5)), _col2 (type: varchar(5)), _col3 (type: varchar(5)), _col4 (type: varchar(5)), _col5 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(5)), VALUE._col1 (type: varchar(5)), VALUE._col2 (type: varchar(5)), VALUE._col3 (type: varchar(5)), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 304 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 150 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2071,25 +2081,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_floating_string
-            Statistics: Num rows: 10 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: string), c2 (type: string), c3 (type: string), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 380 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2230,25 +2240,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_floating_char
-            Statistics: Num rows: 10 Data size: 385 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: char(50)), c2 (type: char(50)), c3 (type: char(50)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 385 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 385 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: char(50)), _col2 (type: char(50)), _col3 (type: char(50)), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(50)), VALUE._col1 (type: char(50)), VALUE._col2 (type: char(50)), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 385 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 385 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2389,25 +2399,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_floating_char_trunc
-            Statistics: Num rows: 10 Data size: 357 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: char(7)), c2 (type: char(7)), c3 (type: char(7)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 357 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 357 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: char(7)), _col2 (type: char(7)), _col3 (type: char(7)), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(7)), VALUE._col1 (type: char(7)), VALUE._col2 (type: char(7)), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 357 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 357 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 215 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2548,25 +2558,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_floating_varchar
-            Statistics: Num rows: 10 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 203 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: varchar(50)), c2 (type: varchar(50)), c3 (type: varchar(50)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 203 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 203 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: varchar(50)), _col2 (type: varchar(50)), _col3 (type: varchar(50)), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(50)), VALUE._col1 (type: varchar(50)), VALUE._col2 (type: varchar(50)), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 203 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 203 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2707,25 +2717,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_numeric_group_string_group_floating_varchar_trunc
-            Statistics: Num rows: 10 Data size: 362 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: varchar(7)), c2 (type: varchar(7)), c3 (type: varchar(7)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 362 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 362 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: varchar(7)), _col2 (type: varchar(7)), _col3 (type: varchar(7)), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(7)), VALUE._col1 (type: varchar(7)), VALUE._col2 (type: varchar(7)), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 362 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 362 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 220 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2876,25 +2886,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_string_group_string_group_string
-            Statistics: Num rows: 10 Data size: 548 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 236 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: char(50)), c2 (type: char(9)), c3 (type: varchar(50)), c4 (type: char(9)), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-              Statistics: Num rows: 10 Data size: 548 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 236 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 548 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 236 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: char(50)), _col2 (type: char(9)), _col3 (type: varchar(50)), _col4 (type: char(9)), _col5 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(50)), VALUE._col1 (type: char(9)), VALUE._col2 (type: varchar(50)), VALUE._col3 (type: char(9)), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 10 Data size: 548 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 236 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 548 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 236 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3035,25 +3045,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_string_group_string_group_char
-            Statistics: Num rows: 10 Data size: 463 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 178 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: varchar(50)), c2 (type: varchar(9)), c3 (type: string), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 463 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 178 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 463 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 178 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: varchar(50)), _col2 (type: varchar(9)), _col3 (type: string), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: varchar(50)), VALUE._col1 (type: varchar(9)), VALUE._col2 (type: string), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 463 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 178 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 463 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 178 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3194,25 +3204,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_string_group_string_group_varchar
-            Statistics: Num rows: 10 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 187 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: char(50)), c2 (type: char(9)), c3 (type: string), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 187 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 187 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: char(50)), _col2 (type: char(9)), _col3 (type: string), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: char(50)), VALUE._col1 (type: char(9)), VALUE._col2 (type: string), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 187 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 187 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3367,25 +3377,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_lower_to_higher_numeric_group_tinyint
-            Statistics: Num rows: 10 Data size: 446 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 118 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: smallint), c2 (type: int), c3 (type: bigint), c4 (type: decimal(38,18)), c5 (type: float), c6 (type: double), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
-              Statistics: Num rows: 10 Data size: 446 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 118 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 446 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 118 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: decimal(38,18)), _col5 (type: float), _col6 (type: double), _col7 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: decimal(38,18)), VALUE._col4 (type: float), VALUE._col5 (type: double), VALUE._col6 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
-          Statistics: Num rows: 10 Data size: 446 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 118 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 446 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 118 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3530,25 +3540,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_lower_to_higher_numeric_group_smallint
-            Statistics: Num rows: 10 Data size: 445 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 130 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: int), c2 (type: bigint), c3 (type: decimal(38,18)), c4 (type: float), c5 (type: double), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-              Statistics: Num rows: 10 Data size: 445 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 130 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 445 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 130 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: int), _col2 (type: bigint), _col3 (type: decimal(38,18)), _col4 (type: float), _col5 (type: double), _col6 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: decimal(38,18)), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-          Statistics: Num rows: 10 Data size: 445 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 130 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 445 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 130 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3691,25 +3701,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_lower_to_higher_numeric_group_int
-            Statistics: Num rows: 10 Data size: 429 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 132 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: bigint), c2 (type: decimal(38,18)), c3 (type: float), c4 (type: double), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-              Statistics: Num rows: 10 Data size: 429 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 132 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 429 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 132 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: bigint), _col2 (type: decimal(38,18)), _col3 (type: float), _col4 (type: double), _col5 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(38,18)), VALUE._col2 (type: float), VALUE._col3 (type: double), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 10 Data size: 429 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 132 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 429 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 132 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3850,25 +3860,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_lower_to_higher_numeric_group_bigint
-            Statistics: Num rows: 10 Data size: 411 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 127 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: decimal(38,18)), c2 (type: float), c3 (type: double), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 10 Data size: 411 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 127 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 411 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: decimal(38,18)), _col2 (type: float), _col3 (type: double), _col4 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: decimal(38,18)), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 10 Data size: 411 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 127 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 411 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 127 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -4007,25 +4017,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_lower_to_higher_numeric_group_decimal
-            Statistics: Num rows: 10 Data size: 335 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 238 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: float), c2 (type: double), b (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 10 Data size: 335 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 238 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 335 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 238 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: float), _col2 (type: double), _col3 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: float), VALUE._col1 (type: double), VALUE._col2 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 10 Data size: 335 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 238 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 335 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 238 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -4162,25 +4172,25 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: table_change_lower_to_higher_numeric_group_float
-            Statistics: Num rows: 10 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: insert_num (type: int), c1 (type: double), b (type: string)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 10 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: int)
                 sort order: +
-                Statistics: Num rows: 10 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: double), _col2 (type: string)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 10 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 10 Data size: 148 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/show_create_table_alter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_create_table_alter.q.out b/ql/src/test/results/clientpositive/show_create_table_alter.q.out
index 32819ea..d09f30b 100644
--- a/ql/src/test/results/clientpositive/show_create_table_alter.q.out
+++ b/ql/src/test/results/clientpositive/show_create_table_alter.q.out
@@ -35,6 +35,11 @@ OUTPUTFORMAT
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
+  'COLUMN_STATS_ACCURATE'='{\"BASIC_STATS\":\"true\"}', 
+  'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
+  'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: -- Add a comment to the table, change the EXTERNAL property, and test SHOW CREATE TABLE on the change.
 ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE')
@@ -73,6 +78,8 @@ TBLPROPERTIES (
   'EXTERNAL'='FALSE', 
 #### A masked pattern was here ####
   'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
   'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: -- Alter the table comment, change the EXTERNAL property back and test SHOW CREATE TABLE on the change.
@@ -111,6 +118,8 @@ LOCATION
 TBLPROPERTIES (
 #### A masked pattern was here ####
   'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
   'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: -- Change the 'SORTBUCKETCOLSPREFIX' property and test SHOW CREATE TABLE. The output should not change.
@@ -149,6 +158,8 @@ LOCATION
 TBLPROPERTIES (
 #### A masked pattern was here ####
   'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
   'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: -- Alter the storage handler of the table, and test SHOW CREATE TABLE.
@@ -187,6 +198,8 @@ LOCATION
 TBLPROPERTIES (
 #### A masked pattern was here ####
   'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
   'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: DROP TABLE tmp_showcrt1

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/show_create_table_db_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_create_table_db_table.q.out b/ql/src/test/results/clientpositive/show_create_table_db_table.q.out
index 495f4b5..daf63e9 100644
--- a/ql/src/test/results/clientpositive/show_create_table_db_table.q.out
+++ b/ql/src/test/results/clientpositive/show_create_table_db_table.q.out
@@ -46,6 +46,11 @@ OUTPUTFORMAT
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
+  'COLUMN_STATS_ACCURATE'='{\"BASIC_STATS\":\"true\"}', 
+  'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
+  'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: DROP TABLE tmp_feng.tmp_showcrt
 PREHOOK: type: DROPTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/show_create_table_serde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_create_table_serde.q.out b/ql/src/test/results/clientpositive/show_create_table_serde.q.out
index 2350d98..a7bcb44 100644
--- a/ql/src/test/results/clientpositive/show_create_table_serde.q.out
+++ b/ql/src/test/results/clientpositive/show_create_table_serde.q.out
@@ -42,6 +42,8 @@ LOCATION
 TBLPROPERTIES (
 #### A masked pattern was here ####
   'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
   'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: DROP TABLE tmp_showcrt1
@@ -90,6 +92,11 @@ OUTPUTFORMAT
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
+  'COLUMN_STATS_ACCURATE'='{\"BASIC_STATS\":\"true\"}', 
+  'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
+  'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: DROP TABLE tmp_showcrt1
 PREHOOK: type: DROPTABLE
@@ -139,6 +146,11 @@ OUTPUTFORMAT
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
+  'COLUMN_STATS_ACCURATE'='{\"BASIC_STATS\":\"true\"}', 
+  'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
+  'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: DROP TABLE tmp_showcrt1
 PREHOOK: type: DROPTABLE
@@ -183,6 +195,11 @@ WITH SERDEPROPERTIES (
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
+  'COLUMN_STATS_ACCURATE'='{\"BASIC_STATS\":\"true\"}', 
+  'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
+  'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: DROP TABLE tmp_showcrt1
 PREHOOK: type: DROPTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/show_tblproperties.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_tblproperties.q.out b/ql/src/test/results/clientpositive/show_tblproperties.q.out
index 63bbe6d..e1c6670 100644
--- a/ql/src/test/results/clientpositive/show_tblproperties.q.out
+++ b/ql/src/test/results/clientpositive/show_tblproperties.q.out
@@ -39,6 +39,8 @@ POSTHOOK: type: SHOW_TBLPROPERTIES
 bar	bar value
 #### A masked pattern was here ####
 numFiles	0
+numRows	0
+rawDataSize	0
 tmp	true
 totalSize	0
 #### A masked pattern was here ####
@@ -54,6 +56,8 @@ POSTHOOK: type: SHOW_TBLPROPERTIES
 bar	bar value
 #### A masked pattern was here ####
 numFiles	0
+numRows	0
+rawDataSize	0
 tmp	true
 totalSize	0
 #### A masked pattern was here ####
@@ -107,6 +111,8 @@ POSTHOOK: type: SHOW_TBLPROPERTIES
 bar	bar value
 #### A masked pattern was here ####
 numFiles	0
+numRows	0
+rawDataSize	0
 tmp	true
 totalSize	0
 #### A masked pattern was here ####
@@ -124,6 +130,8 @@ POSTHOOK: type: SHOW_TBLPROPERTIES
 bar	bar value1
 #### A masked pattern was here ####
 numFiles	0
+numRows	0
+rawDataSize	0
 tmp	true1
 totalSize	0
 #### A masked pattern was here ####
@@ -147,6 +155,8 @@ POSTHOOK: type: SHOW_TBLPROPERTIES
 bar	bar value1
 #### A masked pattern was here ####
 numFiles	0
+numRows	0
+rawDataSize	0
 tmp	true1
 totalSize	0
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out
index dfb571d..e2be217 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out
@@ -174,8 +174,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -219,8 +221,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -374,8 +378,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -419,8 +425,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -564,8 +572,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -665,8 +675,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -710,8 +722,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
index 76ea0a8..eff3671 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
@@ -194,8 +194,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -266,8 +268,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_medium
                     numFiles 3
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_medium { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -331,8 +335,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_medium
                     numFiles 3
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_medium { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -439,8 +445,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -484,8 +492,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out
index c942d11..e09df8c 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out
@@ -154,8 +154,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -308,8 +310,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -452,8 +456,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -497,8 +503,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -599,8 +607,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
index ba2547a..a59c8a4 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out
@@ -170,8 +170,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -324,8 +326,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -468,8 +472,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -513,8 +519,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -615,8 +623,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
index 99fca25..04e5f40 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out
@@ -140,6 +140,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -159,6 +161,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.bucket_big
                       numFiles 2
+                      numRows 0
+                      rawDataSize 0
                       serialization.ddl struct bucket_big { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -285,6 +289,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -304,6 +310,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.bucket_big
                       numFiles 2
+                      numRows 0
+                      rawDataSize 0
                       serialization.ddl struct bucket_big { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -420,6 +428,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 4
+                    numRows 0
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -439,6 +449,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.bucket_small
                       numFiles 4
+                      numRows 0
+                      rawDataSize 0
                       serialization.ddl struct bucket_small { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -518,6 +530,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -537,6 +551,8 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       name default.bucket_big
                       numFiles 2
+                      numRows 0
+                      rawDataSize 0
                       serialization.ddl struct bucket_big { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
index 00c601f..6f8307b 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
@@ -187,8 +187,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -232,8 +234,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -389,8 +393,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -434,8 +440,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -581,8 +589,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -626,8 +636,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -728,8 +740,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -773,8 +787,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/244ce09c/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
index 5564ceb..51f71c0 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
@@ -187,8 +187,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -232,8 +234,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -389,8 +393,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -434,8 +440,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -583,8 +591,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -628,8 +638,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_small
                     numFiles 2
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_small { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -730,8 +742,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -775,8 +789,10 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     name default.bucket_big
                     numFiles 4
+                    numRows 0
                     partition_columns ds
                     partition_columns.types string
+                    rawDataSize 0
                     serialization.ddl struct bucket_big { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe