You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/06/27 02:05:34 UTC

[01/13] hive git commit: HIVE-19980: GenericUDTFGetSplits fails when order by query returns 0 rows (Prasanth Jayachandran reviewed by Jason Dere)

Repository: hive
Updated Branches:
  refs/heads/master-txnstats 61c55a3f6 -> 798ff7d24


HIVE-19980: GenericUDTFGetSplits fails when order by query returns 0 rows (Prasanth Jayachandran reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bd5d2b70
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bd5d2b70
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bd5d2b70

Branch: refs/heads/master-txnstats
Commit: bd5d2b70c2be194cada5c168421e22eb918fc684
Parents: b5160e7
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Mon Jun 25 22:09:40 2018 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Mon Jun 25 22:09:40 2018 -0700

----------------------------------------------------------------------
 .../hive/jdbc/TestJdbcGenericUDTFGetSplits.java |  3 ++
 .../hive/ql/exec/tez/HiveSplitGenerator.java    | 40 ++++++++++++--------
 2 files changed, 27 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bd5d2b70/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcGenericUDTFGetSplits.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcGenericUDTFGetSplits.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcGenericUDTFGetSplits.java
index c8a428c..b94868b 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcGenericUDTFGetSplits.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcGenericUDTFGetSplits.java
@@ -124,6 +124,9 @@ public class TestJdbcGenericUDTFGetSplits {
     query = "select get_splits(" + "'select value from " + tableName + " order by under_col', 5)";
     runQuery(query, getConfigs(), 1);
 
+    query = "select get_splits(" + "'select value from " + tableName + " order by under_col limit 0', 5)";
+    runQuery(query, getConfigs(), 0);
+
     query = "select get_splits(" +
       "'select `value` from (select value from " + tableName + " where value is not null order by value) as t', 5)";
     runQuery(query, getConfigs(), 1);

http://git-wip-us.apache.org/repos/asf/hive/blob/bd5d2b70/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
index 6daa8df..15c14c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
@@ -210,26 +210,34 @@ public class HiveSplitGenerator extends InputInitializer {
         if (generateSingleSplit &&
           conf.get(HiveConf.ConfVars.HIVETEZINPUTFORMAT.varname).equals(HiveInputFormat.class.getName())) {
           MapWork mapWork = Utilities.getMapWork(jobConf);
-          splits = new InputSplit[1];
           List<Path> paths = Utilities.getInputPathsTez(jobConf, mapWork);
           FileSystem fs = paths.get(0).getFileSystem(jobConf);
           FileStatus[] fileStatuses = fs.listStatus(paths.get(0));
-          FileStatus fileStatus = fileStatuses[0];
-          Preconditions.checkState(paths.size() == 1 && fileStatuses.length == 1 &&
-              mapWork.getAliasToPartnInfo().size() == 1,
-            "Requested to generate single split. Paths and fileStatuses are expected to be 1. " +
-              "Got paths: " + paths.size() + " fileStatuses: " + fileStatuses.length);
-          BlockLocation[] locations = fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
-          Set<String> hostsSet = new HashSet<>();
-          for (BlockLocation location : locations) {
-            hostsSet.addAll(Lists.newArrayList(location.getHosts()));
+          if (fileStatuses.length == 0) {
+            // generate single split typically happens when reading data out of order by queries.
+            // if order by query returns no rows, no files will exists in input path
+            splits = new InputSplit[0];
+          } else {
+            // if files exists in input path then it has to be 1 as this code path gets triggered only
+            // of order by queries which is expected to write only one file (written by one reducer)
+            Preconditions.checkState(paths.size() == 1 && fileStatuses.length == 1 &&
+                mapWork.getAliasToPartnInfo().size() == 1,
+              "Requested to generate single split. Paths and fileStatuses are expected to be 1. " +
+                "Got paths: " + paths.size() + " fileStatuses: " + fileStatuses.length);
+            splits = new InputSplit[1];
+            FileStatus fileStatus = fileStatuses[0];
+            BlockLocation[] locations = fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
+            Set<String> hostsSet = new HashSet<>();
+            for (BlockLocation location : locations) {
+              hostsSet.addAll(Lists.newArrayList(location.getHosts()));
+            }
+            String[] hosts = hostsSet.toArray(new String[0]);
+            FileSplit fileSplit = new FileSplit(fileStatus.getPath(), 0, fileStatus.getLen(), hosts);
+            String alias = mapWork.getAliases().get(0);
+            PartitionDesc partDesc = mapWork.getAliasToPartnInfo().get(alias);
+            String partIF = partDesc.getInputFileFormatClassName();
+            splits[0] = new HiveInputFormat.HiveInputSplit(fileSplit, partIF);
           }
-          String[] hosts = hostsSet.toArray(new String[0]);
-          FileSplit fileSplit = new FileSplit(fileStatus.getPath(), 0, fileStatus.getLen(), hosts);
-          String alias = mapWork.getAliases().get(0);
-          PartitionDesc partDesc = mapWork.getAliasToPartnInfo().get(alias);
-          String partIF = partDesc.getInputFileFormatClassName();
-          splits[0] = new HiveInputFormat.HiveInputSplit(fileSplit, partIF);
         } else {
           // Raw splits
           splits = inputFormat.getSplits(jobConf, (int) (availableSlots * waves));


[11/13] hive git commit: HIVE-19532 : fix tests - update some out files on master-txnstats branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out b/ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out
index 19504bc..b57730a 100644
--- a/ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out
+++ b/ql/src/test/results/clientpositive/llap/results_cache_transactional.q.out
@@ -56,19 +56,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: tab1_n1
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: max(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -79,10 +79,10 @@ STAGE PLANS:
                 aggregations: max(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -152,19 +152,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab1_n1
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Map 4 
@@ -172,19 +172,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab2_n1
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -196,15 +196,15 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
-                Statistics: Num rows: 95 Data size: 17028 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
@@ -213,10 +213,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -429,19 +429,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab1_n1
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Map 4 
@@ -473,15 +473,15 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
-                Statistics: Num rows: 550 Data size: 47850 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
@@ -490,10 +490,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -541,19 +541,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab1_n1
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Map 4 
@@ -585,15 +585,15 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
-                Statistics: Num rows: 550 Data size: 47850 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
@@ -602,10 +602,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_all.q.out b/ql/src/test/results/clientpositive/mm_all.q.out
index e7df4c0..e5428bb 100644
--- a/ql/src/test/results/clientpositive/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/mm_all.q.out
@@ -1829,6 +1829,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	6                   
@@ -1879,6 +1880,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	2                   
 	numRows             	12                  
@@ -1937,7 +1939,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{}                  
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	500                 

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/mm_default.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_default.q.out b/ql/src/test/results/clientpositive/mm_default.q.out
index 4ba6aa5..5a85554 100644
--- a/ql/src/test/results/clientpositive/mm_default.q.out
+++ b/ql/src/test/results/clientpositive/mm_default.q.out
@@ -180,7 +180,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{}                  
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/row__id.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/row__id.q.out b/ql/src/test/results/clientpositive/row__id.q.out
index e83b590..7d29666 100644
--- a/ql/src/test/results/clientpositive/row__id.q.out
+++ b/ql/src/test/results/clientpositive/row__id.q.out
@@ -62,24 +62,24 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hello_acid
-            Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ROW__ID.writeid (type: bigint)
               outputColumnNames: _col0
-              Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: bigint)
                 sort order: +
-                Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: bigint)
           outputColumnNames: _col0
-          Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -125,17 +125,17 @@ STAGE PLANS:
           TableScan
             alias: hello_acid
             filterExpr: (ROW__ID.writeid = 3) (type: boolean)
-            Statistics: Num rows: 78 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 19860 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (ROW__ID.writeid = 3) (type: boolean)
-              Statistics: Num rows: 39 Data size: 9930 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 6620 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ROW__ID.writeid (type: bigint)
                 outputColumnNames: _col0
-                Statistics: Num rows: 39 Data size: 9930 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 6620 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 39 Data size: 9930 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 6620 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/stats_nonpart.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_nonpart.q.out b/ql/src/test/results/clientpositive/stats_nonpart.q.out
index 7bf9943..7df570a 100644
--- a/ql/src/test/results/clientpositive/stats_nonpart.q.out
+++ b/ql/src/test/results/clientpositive/stats_nonpart.q.out
@@ -117,6 +117,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: mysource
+            filterExpr: (p = 100) (type: boolean)
             Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (p = 100) (type: boolean)
@@ -212,7 +213,7 @@ Table Parameters:
 	numFiles            	1                   
 	numRows             	2                   
 	rawDataSize         	0                   
-	totalSize           	720                 
+	totalSize           	719                 
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/stats_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_part.q.out b/ql/src/test/results/clientpositive/stats_part.q.out
index 8760dad..51bdfab 100644
--- a/ql/src/test/results/clientpositive/stats_part.q.out
+++ b/ql/src/test/results/clientpositive/stats_part.q.out
@@ -72,6 +72,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: stats_part
+            filterExpr: (p > 100) (type: boolean)
             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
             Filter Operator
               predicate: (p > 100) (type: boolean)
@@ -209,7 +210,7 @@ Table Parameters:
 	numPartitions       	3                   
 	numRows             	6                   
 	rawDataSize         	0                   
-	totalSize           	2241                
+	totalSize           	2244                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -272,7 +273,7 @@ Table Parameters:
 	numPartitions       	3                   
 	numRows             	8                   
 	rawDataSize         	0                   
-	totalSize           	2994                
+	totalSize           	2998                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -415,7 +416,7 @@ Table Parameters:
 	numPartitions       	3                   
 	numRows             	8                   
 	rawDataSize         	0                   
-	totalSize           	2994                
+	totalSize           	2998                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -498,7 +499,7 @@ Table Parameters:
 	numPartitions       	3                   
 	numRows             	8                   
 	rawDataSize         	0                   
-	totalSize           	2994                
+	totalSize           	2998                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/stats_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_part2.q.out b/ql/src/test/results/clientpositive/stats_part2.q.out
index 94e186d..9c22ce7 100644
--- a/ql/src/test/results/clientpositive/stats_part2.q.out
+++ b/ql/src/test/results/clientpositive/stats_part2.q.out
@@ -58,6 +58,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: stats_part
+            filterExpr: (p > 100) (type: boolean)
             Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (p > 100) (type: boolean)
@@ -108,6 +109,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: stats_part
+            filterExpr: (p > 100) (type: boolean)
             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
             Filter Operator
               predicate: (p > 100) (type: boolean)
@@ -245,7 +247,7 @@ Table Parameters:
 	numPartitions       	3                   
 	numRows             	6                   
 	rawDataSize         	0                   
-	totalSize           	2337                
+	totalSize           	2335                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -336,7 +338,7 @@ Table Parameters:
 	numPartitions       	3                   
 	numRows             	8                   
 	rawDataSize         	0                   
-	totalSize           	3126                
+	totalSize           	3124                
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -476,7 +478,7 @@ Partition Parameters:
 	numFiles            	1                   
 	numRows             	2                   
 	rawDataSize         	0                   
-	totalSize           	758                 
+	totalSize           	756                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -591,19 +593,20 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: stats_part
-            Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+            filterExpr: (p > 100) (type: boolean)
+            Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: key (type: int)
               outputColumnNames: key
-              Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
               Group By Operator
                 aggregations: max(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
                   value expressions: _col0 (type: int)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -611,10 +614,10 @@ STAGE PLANS:
           aggregations: max(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -663,7 +666,7 @@ Partition Parameters:
 	numFiles            	1                   
 	numRows             	2                   
 	rawDataSize         	0                   
-	totalSize           	758                 
+	totalSize           	756                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -700,7 +703,7 @@ Partition Parameters:
 	numFiles            	3                   
 	numRows             	2                   
 	rawDataSize         	0                   
-	totalSize           	2238                
+	totalSize           	2235                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -803,7 +806,7 @@ Partition Parameters:
 	numFiles            	1                   
 	numRows             	2                   
 	rawDataSize         	0                   
-	totalSize           	758                 
+	totalSize           	756                 
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -840,7 +843,7 @@ Partition Parameters:
 	numFiles            	3                   
 	numRows             	2                   
 	rawDataSize         	0                   
-	totalSize           	2238                
+	totalSize           	2235                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -947,7 +950,7 @@ Partition Parameters:
 	numFiles            	2                   
 	numRows             	1                   
 	rawDataSize         	0                   
-	totalSize           	1366                
+	totalSize           	1453                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -984,7 +987,7 @@ Partition Parameters:
 	numFiles            	4                   
 	numRows             	1                   
 	rawDataSize         	0                   
-	totalSize           	2837                
+	totalSize           	2929                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -1181,19 +1184,20 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: stats_part
-            Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+            filterExpr: (p > 100) (type: boolean)
+            Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: key (type: int)
               outputColumnNames: key
-              Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
               Group By Operator
                 aggregations: max(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
                   value expressions: _col0 (type: int)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -1201,10 +1205,10 @@ STAGE PLANS:
           aggregations: max(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/stats_sizebug.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_sizebug.q.out b/ql/src/test/results/clientpositive/stats_sizebug.q.out
index b1bbf94..648a9fa 100644
--- a/ql/src/test/results/clientpositive/stats_sizebug.q.out
+++ b/ql/src/test/results/clientpositive/stats_sizebug.q.out
@@ -57,6 +57,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: mysource
+            filterExpr: (p = 100) (type: boolean)
             Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (p = 100) (type: boolean)
@@ -152,7 +153,7 @@ Table Parameters:
 	numFiles            	1                   
 	numRows             	2                   
 	rawDataSize         	0                   
-	totalSize           	719                 
+	totalSize           	718                 
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -199,7 +200,7 @@ Table Parameters:
 	numFiles            	1                   
 	numRows             	2                   
 	rawDataSize         	0                   
-	totalSize           	719                 
+	totalSize           	718                 
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
index 88499fd..b400361 100644
--- a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
+++ b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
@@ -680,22 +680,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over10k_orc_bucketed_n0
-                  Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                     outputColumnNames: ROW__ID
-                    Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count()
                       keys: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
-                        Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -704,13 +704,13 @@ STAGE PLANS:
                 keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
                 Filter Operator
                   predicate: (_col1 > 1L) (type: boolean)
-                  Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
index bab0d24..5a50431 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_5.q.out
@@ -295,17 +295,17 @@ Stage-3
               Reducer 2
               File Output Operator [FS_8]
                 table:{"name:":"default.acid_uami_n2"}
-                Select Operator [SEL_4] (rows=10/2 width=316)
+                Select Operator [SEL_4] (rows=2/2 width=302)
                   Output:["_col0","_col1","_col2","_col3"]
                 <-Map 1 [SIMPLE_EDGE]
                   SHUFFLE [RS_3]
                     PartitionCols:UDFToInteger(_col0)
-                    Select Operator [SEL_2] (rows=10/2 width=316)
+                    Select Operator [SEL_2] (rows=2/2 width=302)
                       Output:["_col0","_col1","_col3"]
-                      Filter Operator [FIL_9] (rows=10/2 width=316)
+                      Filter Operator [FIL_9] (rows=2/2 width=226)
                         predicate:((de = 109.23) or (de = 119.23))
-                        TableScan [TS_0] (rows=85/4 width=316)
-                          default@acid_uami_n2,acid_uami_n2, ACID table,Tbl:COMPLETE,Col:NONE,Output:["i","de","vc"]
+                        TableScan [TS_0] (rows=4/4 width=226)
+                          default@acid_uami_n2,acid_uami_n2, ACID table,Tbl:COMPLETE,Col:COMPLETE,Output:["i","de","vc"]
 
 PREHOOK: query: select * from acid_uami_n2 order by de
 PREHOOK: type: QUERY


[03/13] hive git commit: HIVE-19481 : Tablesample uses incorrect logic to pick files corresponding to buckets. (Deepak Jaiswal, reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19481 : Tablesample uses incorrect logic to pick files corresponding to buckets. (Deepak Jaiswal, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eaf416ea
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eaf416ea
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eaf416ea

Branch: refs/heads/master-txnstats
Commit: eaf416ea1de146fef0bb3349d811282d89b9ec0e
Parents: bd5d2b7
Author: Deepak Jaiswal <dj...@apache.org>
Authored: Mon Jun 25 23:02:21 2018 -0700
Committer: Deepak Jaiswal <dj...@apache.org>
Committed: Mon Jun 25 23:02:21 2018 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   1 +
 .../hadoop/hive/ql/metadata/Partition.java      |  13 +-
 .../hadoop/hive/ql/optimizer/SamplePruner.java  |   7 +-
 .../test/queries/clientpositive/sample10_mm.q   |  34 +
 .../archive_excludeHadoop20.q.out               | 135 +++
 .../clientpositive/beeline/smb_mapjoin_11.q.out |   8 +-
 .../results/clientpositive/llap/sample10.q.out  |  20 +-
 .../clientpositive/llap/sample10_mm.q.out       | 346 ++++++++
 .../test/results/clientpositive/masking_5.q.out | 124 +++
 .../test/results/clientpositive/sample6.q.out   | 846 +++++++++++++++++--
 .../test/results/clientpositive/sample7.q.out   | 114 ++-
 .../test/results/clientpositive/sample9.q.out   | 258 +++++-
 .../results/clientpositive/smb_mapjoin_11.q.out |   8 +-
 .../infer_bucket_sort_bucketed_table.q.out      |   2 +-
 .../results/clientpositive/spark/sample10.q.out |  16 +-
 .../results/clientpositive/spark/sample2.q.out  |   4 +-
 .../results/clientpositive/spark/sample4.q.out  |   4 +-
 .../results/clientpositive/spark/sample6.q.out  | 126 +--
 .../results/clientpositive/spark/sample7.q.out  |   4 +-
 19 files changed, 1909 insertions(+), 161 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 793d8cd..9f25a9b 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -611,6 +611,7 @@ minillaplocal.query.files=\
   results_cache_transactional.q,\
   results_cache_with_masking.q,\
   sample10.q,\
+  sample10_mm.q,\
   schema_evol_orc_acid_part_llap_io.q,\
   schema_evol_orc_acid_part.q,\
   schema_evol_orc_acid_part_update_llap_io.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
index 9dbd869..136709c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
@@ -379,7 +379,18 @@ public class Partition implements Serializable {
     if (srcs == null) {
       return null;
     }
-    return srcs[bucketNum].getPath();
+
+    // Compute bucketid from srcs and return the 1st match.
+    for (FileStatus src : srcs) {
+      String bucketName = src.getPath().getName();
+      String bucketIdStr = Utilities.getBucketFileNameFromPathSubString(bucketName);
+      int bucketId = Utilities.getBucketIdFromFile(bucketIdStr);
+      if (bucketId == bucketNum) {
+        // match, return
+        return src.getPath();
+      }
+    }
+    return null;
   }
 
   @SuppressWarnings("nls")

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
index 8200e6a..75bce63 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
@@ -27,6 +27,7 @@ import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Stack;
 
+import org.apache.hadoop.hive.metastore.TableType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileStatus;
@@ -192,8 +193,8 @@ public class SamplePruner extends Transform {
 
     // check if input pruning is possible
     // TODO: this code is buggy - it relies on having one file per bucket; no MM support (by design).
-    boolean isMmTable = AcidUtils.isInsertOnlyTable(part.getTable().getParameters());
-    if (sampleDescr.getInputPruning() && !isMmTable) {
+    boolean isManagedTable = part.getTable().getTableType() == TableType.MANAGED_TABLE;
+    if (sampleDescr.getInputPruning() && !isManagedTable) {
       LOG.trace("numerator = " + num);
       LOG.trace("denominator = " + den);
       LOG.trace("bucket count = " + bucketCount);
@@ -220,7 +221,7 @@ public class SamplePruner extends Transform {
       }
     } else {
       // need to do full scan
-      fullScanMsg = isMmTable ? "MM table" : "Tablesample not on clustered columns";
+      fullScanMsg = isManagedTable ? "Managed table" : "Tablesample not on clustered columns";
     }
     LOG.warn(fullScanMsg + ", using full table scan");
     Path[] ret = part.getPath();

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/queries/clientpositive/sample10_mm.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample10_mm.q b/ql/src/test/queries/clientpositive/sample10_mm.q
new file mode 100644
index 0000000..f653e67
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/sample10_mm.q
@@ -0,0 +1,34 @@
+--! qt:dataset:srcpart
+set hive.mapred.mode=nonstrict;
+set hive.exec.submitviachild=false;
+set hive.exec.submit.local.task.via.child=false;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+set hive.exec.reducers.max=4;
+set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
+set hive.default.fileformat=RCFILE;
+set hive.exec.pre.hooks = org.apache.hadoop.hive.ql.hooks.PreExecutePrinter,org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyTables,org.apache.hadoop.hive.ql.hooks.UpdateInputAccessTimeHook$PreExec;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table srcpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 4 buckets stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+
+insert overwrite table srcpartbucket partition(ds, hr) select * from srcpart where ds is not null and key < 10;
+
+
+select * from srcpartbucket;
+explain select key from srcpartbucket tablesample (bucket 2 out of 4 on key);
+select key from srcpartbucket tablesample (bucket 1 out of 4 on key);
+select key from srcpartbucket tablesample (bucket 2 out of 4 on key);
+select key from srcpartbucket tablesample (bucket 3 out of 4 on key);
+select key from srcpartbucket tablesample (bucket 4 out of 4 on key);
+
+explain
+        select key from srcpartbucket tablesample (bucket 2 out of 4 on key) group by key;
+select key from srcpartbucket tablesample (bucket 1 out of 4 on key) group by key;
+select key from srcpartbucket tablesample (bucket 2 out of 4 on key) group by key;
+select key from srcpartbucket tablesample (bucket 3 out of 4 on key) group by key;
+select key from srcpartbucket tablesample (bucket 4 out of 4 on key) group by key;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out b/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out
index e4b390c..140da7b 100644
--- a/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out
+++ b/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out
@@ -215,7 +215,52 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@harbucket
 POSTHOOK: Input: default@harbucket@ds=1
 #### A masked pattern was here ####
+51
+51
+54
+69
+96
+133
+162
+163
+165
+165
+237
+237
+238
+238
+256
+256
 260
+289
+311
+311
+311
+332
+344
+344
+362
+369
+369
+369
+393
+397
+397
+407
+411
+432
+435
+453
+454
+454
+454
+466
+466
+466
+484
+498
+498
+498
 PREHOOK: query: ALTER TABLE tstsrcpart_n2 ARCHIVE PARTITION (ds='2008-04-08', hr='12')
 PREHOOK: type: ALTERTABLE_ARCHIVE
 PREHOOK: Input: default@tstsrcpart_n2
@@ -234,7 +279,52 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@harbucket
 POSTHOOK: Input: default@harbucket@ds=1
 #### A masked pattern was here ####
+51
+51
+54
+69
+96
+133
+162
+163
+165
+165
+237
+237
+238
+238
+256
+256
 260
+289
+311
+311
+311
+332
+344
+344
+362
+369
+369
+369
+393
+397
+397
+407
+411
+432
+435
+453
+454
+454
+454
+466
+466
+466
+484
+498
+498
+498
 PREHOOK: query: ALTER TABLE tstsrcpart_n2 UNARCHIVE PARTITION (ds='2008-04-08', hr='12')
 PREHOOK: type: ALTERTABLE_UNARCHIVE
 PREHOOK: Input: default@tstsrcpart_n2
@@ -253,7 +343,52 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@harbucket
 POSTHOOK: Input: default@harbucket@ds=1
 #### A masked pattern was here ####
+51
+51
+54
+69
+96
+133
+162
+163
+165
+165
+237
+237
+238
+238
+256
+256
 260
+289
+311
+311
+311
+332
+344
+344
+362
+369
+369
+369
+393
+397
+397
+407
+411
+432
+435
+453
+454
+454
+454
+466
+466
+466
+484
+498
+498
+498
 PREHOOK: query: CREATE TABLE old_name(key INT)
 PARTITIONED by (ds STRING)
 PREHOOK: type: CREATETABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
index 9f946e0..de02982 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_11.q.out
@@ -1932,7 +1932,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000001_0
+            base file name: ds=1
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -1982,7 +1982,7 @@ STAGE PLANS:
               name: default.test_table1_n1
             name: default.test_table1_n1
       Truncated Path -> Alias:
-        /test_table1_n1/ds=1/000001_0 [test_table1_n1]
+        /test_table1_n1/ds=1 [test_table1_n1]
 
   Stage: Stage-0
     Fetch Operator
@@ -2043,7 +2043,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000001_0
+            base file name: ds=1
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -2092,7 +2092,7 @@ STAGE PLANS:
               name: default.test_table3_n1
             name: default.test_table3_n1
       Truncated Path -> Alias:
-        /test_table3_n1/ds=1/000001_0 [test_table3_n1]
+        /test_table3_n1/ds=1 [test_table3_n1]
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/llap/sample10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/sample10.q.out b/ql/src/test/results/clientpositive/llap/sample10.q.out
index ce3c288..66214ec 100644
--- a/ql/src/test/results/clientpositive/llap/sample10.q.out
+++ b/ql/src/test/results/clientpositive/llap/sample10.q.out
@@ -90,7 +90,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000002_0
+                  base file name: hr=11
                   input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   partition values:
@@ -141,7 +141,7 @@ STAGE PLANS:
                   name: default.srcpartbucket
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000002_0
+                  base file name: hr=12
                   input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   partition values:
@@ -192,7 +192,7 @@ STAGE PLANS:
                   name: default.srcpartbucket
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000002_0
+                  base file name: hr=11
                   input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   partition values:
@@ -243,7 +243,7 @@ STAGE PLANS:
                   name: default.srcpartbucket
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000002_0
+                  base file name: hr=12
                   input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   partition values:
@@ -293,10 +293,10 @@ STAGE PLANS:
                     name: default.srcpartbucket
                   name: default.srcpartbucket
             Truncated Path -> Alias:
-              /srcpartbucket/ds=2008-04-08/hr=11/000002_0 [srcpartbucket]
-              /srcpartbucket/ds=2008-04-08/hr=12/000002_0 [srcpartbucket]
-              /srcpartbucket/ds=2008-04-09/hr=11/000002_0 [srcpartbucket]
-              /srcpartbucket/ds=2008-04-09/hr=12/000002_0 [srcpartbucket]
+              /srcpartbucket/ds=2008-04-08/hr=11 [srcpartbucket]
+              /srcpartbucket/ds=2008-04-08/hr=12 [srcpartbucket]
+              /srcpartbucket/ds=2008-04-09/hr=11 [srcpartbucket]
+              /srcpartbucket/ds=2008-04-09/hr=12 [srcpartbucket]
         Reducer 2 
             Execution mode: vectorized, llap
             Needs Tagging: false
@@ -368,6 +368,8 @@ POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
 POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
+2008-04-08	14
+2008-04-09	14
 PREHOOK: query: select ds, count(1) from srcpartbucket tablesample (bucket 1 out of 2 on key) where ds is not null group by ds ORDER BY ds ASC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpartbucket
@@ -384,6 +386,8 @@ POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
 POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
+2008-04-08	4
+2008-04-09	4
 PREHOOK: query: select * from srcpartbucket where ds is not null ORDER BY key ASC, value ASC, ds ASC, hr ASC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpartbucket

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/llap/sample10_mm.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/sample10_mm.q.out b/ql/src/test/results/clientpositive/llap/sample10_mm.q.out
new file mode 100644
index 0000000..28d0cd0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/sample10_mm.q.out
@@ -0,0 +1,346 @@
+PREHOOK: query: create table srcpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 4 buckets stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcpartbucket
+POSTHOOK: query: create table srcpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 4 buckets stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcpartbucket
+PREHOOK: query: insert overwrite table srcpartbucket partition(ds, hr) select * from srcpart where ds is not null and key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@srcpartbucket
+POSTHOOK: query: insert overwrite table srcpartbucket partition(ds, hr) select * from srcpart where ds is not null and key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Output: default@srcpartbucket@ds=2008-04-09/hr=12
+POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpartbucket PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from srcpartbucket
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srcpartbucket
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5	val_5	2008-04-08	11
+2	val_2	2008-04-08	11
+5	val_5	2008-04-08	11
+5	val_5	2008-04-08	11
+0	val_0	2008-04-08	11
+0	val_0	2008-04-08	11
+0	val_0	2008-04-08	11
+9	val_9	2008-04-08	11
+8	val_8	2008-04-08	11
+4	val_4	2008-04-08	11
+5	val_5	2008-04-08	12
+2	val_2	2008-04-08	12
+5	val_5	2008-04-08	12
+5	val_5	2008-04-08	12
+0	val_0	2008-04-08	12
+0	val_0	2008-04-08	12
+0	val_0	2008-04-08	12
+9	val_9	2008-04-08	12
+8	val_8	2008-04-08	12
+4	val_4	2008-04-08	12
+5	val_5	2008-04-09	11
+2	val_2	2008-04-09	11
+5	val_5	2008-04-09	11
+5	val_5	2008-04-09	11
+0	val_0	2008-04-09	11
+0	val_0	2008-04-09	11
+0	val_0	2008-04-09	11
+9	val_9	2008-04-09	11
+8	val_8	2008-04-09	11
+4	val_4	2008-04-09	11
+5	val_5	2008-04-09	12
+2	val_2	2008-04-09	12
+5	val_5	2008-04-09	12
+5	val_5	2008-04-09	12
+0	val_0	2008-04-09	12
+0	val_0	2008-04-09	12
+0	val_0	2008-04-09	12
+9	val_9	2008-04-09	12
+8	val_8	2008-04-09	12
+4	val_4	2008-04-09	12
+PREHOOK: query: explain select key from srcpartbucket tablesample (bucket 2 out of 4 on key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key from srcpartbucket tablesample (bucket 2 out of 4 on key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: srcpartbucket
+          Filter Operator
+            predicate: (((hash(key) & 2147483647) % 4) = 1) (type: boolean)
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: _col0
+              ListSink
+
+PREHOOK: query: select key from srcpartbucket tablesample (bucket 1 out of 4 on key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key from srcpartbucket tablesample (bucket 1 out of 4 on key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+PREHOOK: query: select key from srcpartbucket tablesample (bucket 2 out of 4 on key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key from srcpartbucket tablesample (bucket 2 out of 4 on key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5
+2
+5
+5
+0
+0
+0
+5
+2
+5
+5
+0
+0
+0
+5
+2
+5
+5
+0
+0
+0
+5
+2
+5
+5
+0
+0
+0
+PREHOOK: query: select key from srcpartbucket tablesample (bucket 3 out of 4 on key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key from srcpartbucket tablesample (bucket 3 out of 4 on key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+9
+8
+9
+8
+9
+8
+9
+8
+PREHOOK: query: select key from srcpartbucket tablesample (bucket 4 out of 4 on key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key from srcpartbucket tablesample (bucket 4 out of 4 on key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+4
+4
+4
+4
+PREHOOK: query: explain
+        select key from srcpartbucket tablesample (bucket 2 out of 4 on key) group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+        select key from srcpartbucket tablesample (bucket 2 out of 4 on key) group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpartbucket
+                  Statistics: Num rows: 40 Data size: 3400 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (((hash(key) & 2147483647) % 4) = 1) (type: boolean)
+                    Statistics: Num rows: 20 Data size: 1700 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: may be used (ACID table)
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key from srcpartbucket tablesample (bucket 1 out of 4 on key) group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key from srcpartbucket tablesample (bucket 1 out of 4 on key) group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+PREHOOK: query: select key from srcpartbucket tablesample (bucket 2 out of 4 on key) group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key from srcpartbucket tablesample (bucket 2 out of 4 on key) group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5
+0
+2
+PREHOOK: query: select key from srcpartbucket tablesample (bucket 3 out of 4 on key) group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key from srcpartbucket tablesample (bucket 3 out of 4 on key) group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+9
+8
+PREHOOK: query: select key from srcpartbucket tablesample (bucket 4 out of 4 on key) group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpartbucket
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key from srcpartbucket tablesample (bucket 4 out of 4 on key) group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpartbucket
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpartbucket@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+4

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/masking_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_5.q.out b/ql/src/test/results/clientpositive/masking_5.q.out
index 498fc11..420a680 100644
--- a/ql/src/test/results/clientpositive/masking_5.q.out
+++ b/ql/src/test/results/clientpositive/masking_5.q.out
@@ -788,3 +788,127 @@ POSTHOOK: Input: default@masking_test_n6
 484	val_484
 98	val_98
 86	val_86
+327	val_327
+437	val_437
+97	val_97
+435	val_435
+169	val_169
+19	val_19
+307	val_307
+255	val_255
+169	val_169
+307	val_307
+419	val_419
+399	val_399
+153	val_153
+287	val_287
+315	val_315
+97	val_97
+427	val_427
+369	val_369
+341	val_341
+77	val_77
+85	val_85
+169	val_169
+409	val_409
+369	val_369
+67	val_67
+137	val_137
+409	val_409
+407	val_407
+421	val_421
+133	val_133
+187	val_187
+233	val_233
+197	val_197
+221	val_221
+417	val_417
+353	val_353
+83	val_83
+249	val_249
+157	val_157
+317	val_317
+165	val_165
+325	val_325
+443	val_443
+169	val_169
+199	val_199
+417	val_417
+479	val_479
+43	val_43
+237	val_237
+491	val_491
+51	val_51
+119	val_119
+149	val_149
+163	val_163
+255	val_255
+351	val_351
+327	val_327
+291	val_291
+165	val_165
+397	val_397
+57	val_57
+187	val_187
+191	val_191
+199	val_199
+311	val_311
+201	val_201
+393	val_393
+197	val_197
+339	val_339
+119	val_119
+113	val_113
+17	val_17
+409	val_409
+105	val_105
+483	val_483
+463	val_463
+195	val_195
+325	val_325
+463	val_463
+229	val_229
+411	val_411
+327	val_327
+467	val_467
+365	val_365
+191	val_191
+397	val_397
+377	val_377
+353	val_353
+485	val_485
+239	val_239
+495	val_495
+113	val_113
+417	val_417
+67	val_67
+181	val_181
+69	val_69
+399	val_399
+83	val_83
+453	val_453
+137	val_137
+449	val_449
+149	val_149
+311	val_311
+41	val_41
+65	val_65
+119	val_119
+221	val_221
+289	val_289
+195	val_195
+199	val_199
+233	val_233
+229	val_229
+239	val_239
+27	val_27
+317	val_317
+51	val_51
+459	val_459
+497	val_497
+311	val_311
+177	val_177
+237	val_237
+187	val_187
+459	val_459
+369	val_369

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/sample6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample6.q.out b/ql/src/test/results/clientpositive/sample6.q.out
index 7f853e5..a6a6f2c 100644
--- a/ql/src/test/results/clientpositive/sample6.q.out
+++ b/ql/src/test/results/clientpositive/sample6.q.out
@@ -95,7 +95,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000000_0
+            base file name: srcbucket
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -144,7 +144,7 @@ STAGE PLANS:
               name: default.srcbucket
             name: default.srcbucket
       Truncated Path -> Alias:
-        /srcbucket/000000_0 [s]
+        /srcbucket [s]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -437,55 +437,125 @@ POSTHOOK: Input: default@dest1_n27
 6	val_7
 10	val_10
 10	val_11
+17	val_17
+21	val_22
+21	val_22
+21	val_22
+21	val_22
+27	val_27
 30	val_30
 30	val_31
+31	val_32
 40	val_41
 40	val_41
+51	val_51
+51	val_51
+51	val_52
+57	val_57
 58	val_58
 58	val_58
 58	val_59
 58	val_59
 64	val_64
+65	val_65
+65	val_66
+65	val_66
 70	val_70
 70	val_70
 70	val_70
 70	val_71
 80	val_80
 80	val_81
+83	val_83
+83	val_83
 86	val_86
 86	val_87
 90	val_90
 90	val_90
 90	val_90
+91	val_92
 98	val_98
 98	val_98
+105	val_105
+105	val_106
+105	val_106
 110	val_111
+113	val_113
+113	val_113
 116	val_116
 116	val_117
+117	val_118
+117	val_118
+119	val_119
+119	val_119
+119	val_119
+119	val_120
+119	val_120
+119	val_120
+121	val_122
+121	val_122
+123	val_124
+123	val_124
 126	val_126
 126	val_127
 126	val_127
 134	val_134
 134	val_134
 134	val_135
+137	val_137
+137	val_137
+137	val_138
+153	val_153
+153	val_154
+153	val_154
 156	val_156
 156	val_157
 156	val_157
+157	val_157
+157	val_158
+157	val_158
 158	val_158
+163	val_163
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+177	val_177
+177	val_178
+177	val_178
 178	val_178
 178	val_179
 178	val_179
 184	val_185
+187	val_187
+187	val_187
+187	val_187
+195	val_195
+195	val_195
+197	val_197
+197	val_197
+197	val_198
 206	val_207
 206	val_207
 206	val_207
 208	val_208
 208	val_208
 208	val_208
+221	val_221
+221	val_221
+229	val_229
+229	val_229
+237	val_237
+237	val_237
+243	val_244
+243	val_244
 244	val_244
 244	val_245
 244	val_245
 244	val_245
+249	val_249
+249	val_250
+249	val_250
 252	val_252
 252	val_253
 254	val_255
@@ -493,27 +563,40 @@ POSTHOOK: Input: default@dest1_n27
 256	val_256
 256	val_257
 266	val_266
+271	val_272
 272	val_272
 272	val_272
 272	val_273
 286	val_286
 286	val_287
+289	val_289
+289	val_290
 292	val_292
 292	val_293
 292	val_293
 304	val_305
+307	val_307
+307	val_307
 308	val_308
 308	val_309
 308	val_309
+315	val_315
 316	val_316
 316	val_316
 316	val_316
+317	val_317
+317	val_317
+317	val_318
 326	val_327
+327	val_327
+327	val_327
+327	val_327
 334	val_335
 336	val_336
 336	val_337
 338	val_338
 338	val_339
+339	val_339
 342	val_342
 342	val_342
 342	val_343
@@ -526,21 +609,45 @@ POSTHOOK: Input: default@dest1_n27
 348	val_348
 348	val_348
 348	val_349
+349	val_350
+349	val_350
+349	val_350
+349	val_350
 352	val_353
 352	val_353
+353	val_353
+353	val_353
+353	val_354
+355	val_356
+355	val_356
 360	val_360
 360	val_361
 362	val_362
 364	val_364
 364	val_365
+369	val_369
+369	val_369
+369	val_369
+369	val_370
+371	val_372
+371	val_372
+371	val_372
+371	val_372
+377	val_377
 378	val_378
 378	val_379
+391	val_392
+391	val_392
 392	val_392
 392	val_393
 392	val_393
 396	val_396
 396	val_396
 396	val_396
+399	val_399
+399	val_399
+399	val_400
+399	val_400
 402	val_402
 402	val_403
 402	val_403
@@ -550,15 +657,31 @@ POSTHOOK: Input: default@dest1_n27
 404	val_405
 404	val_405
 404	val_405
+407	val_407
+407	val_408
+407	val_408
+407	val_408
 408	val_409
 408	val_409
 410	val_411
+417	val_417
+417	val_417
+417	val_417
+419	val_419
+423	val_424
 426	val_427
+427	val_427
+427	val_428
+427	val_428
 440	val_441
 440	val_441
+449	val_449
 452	val_452
 458	val_458
 458	val_458
+463	val_463
+463	val_463
+463	val_464
 466	val_466
 466	val_466
 466	val_466
@@ -569,10 +692,14 @@ POSTHOOK: Input: default@dest1_n27
 478	val_478
 478	val_479
 478	val_479
+479	val_479
 482	val_482
 482	val_483
 484	val_484
 484	val_485
+497	val_497
+497	val_498
+497	val_498
 PREHOOK: query: EXPLAIN EXTENDED SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 4 OUT OF 4 on key) s
 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -613,7 +740,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000001_0
+            base file name: srcbucket
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -662,7 +789,7 @@ STAGE PLANS:
               name: default.srcbucket
             name: default.srcbucket
       Truncated Path -> Alias:
-        /srcbucket/000001_0 [s]
+        /srcbucket [s]
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
@@ -712,11 +839,20 @@ POSTHOOK: Input: default@srcbucket
 5	val_5
 5	val_5
 5	val_6
+12	val_12
+12	val_12
+12	val_13
 15	val_15
 15	val_15
 15	val_16
 15	val_16
+16	val_17
+16	val_17
+22	val_23
 23	val_24
+24	val_24
+24	val_24
+28	val_28
 33	val_33
 33	val_34
 35	val_35
@@ -725,20 +861,47 @@ POSTHOOK: Input: default@srcbucket
 35	val_36
 35	val_36
 35	val_36
+42	val_42
+42	val_42
+42	val_43
+42	val_43
+42	val_43
+44	val_44
 47	val_47
 47	val_48
 49	val_50
 49	val_50
+50	val_51
+52	val_53
+52	val_53
+52	val_53
+52	val_53
 53	val_53
 53	val_54
+56	val_57
 63	val_64
 75	val_76
+76	val_76
+76	val_76
+76	val_77
+76	val_77
+76	val_77
 87	val_87
 87	val_88
 87	val_88
+94	val_95
 95	val_95
 95	val_95
+104	val_104
+104	val_104
+104	val_105
+104	val_105
+104	val_105
 111	val_111
+114	val_114
+114	val_115
+114	val_115
+114	val_115
 125	val_125
 125	val_125
 125	val_126
@@ -746,6 +909,12 @@ POSTHOOK: Input: default@srcbucket
 129	val_129
 129	val_130
 129	val_130
+138	val_138
+138	val_138
+138	val_138
+138	val_138
+138	val_139
+138	val_139
 145	val_145
 147	val_148
 147	val_148
@@ -755,13 +924,28 @@ POSTHOOK: Input: default@srcbucket
 161	val_162
 161	val_162
 161	val_162
+166	val_166
 167	val_167
 167	val_167
 167	val_167
 167	val_168
+168	val_168
+168	val_169
+170	val_170
+170	val_171
+172	val_172
+172	val_172
+172	val_173
+180	val_180
+192	val_192
+192	val_193
 193	val_193
 193	val_193
 193	val_193
+196	val_196
+196	val_197
+196	val_197
+196	val_197
 203	val_203
 203	val_203
 207	val_207
@@ -775,17 +959,25 @@ POSTHOOK: Input: default@srcbucket
 213	val_214
 219	val_219
 219	val_219
+224	val_224
+224	val_224
+224	val_225
 227	val_228
 241	val_241
 241	val_242
 241	val_242
 241	val_242
 241	val_242
+242	val_242
+242	val_242
+242	val_243
 245	val_246
 245	val_246
 257	val_257
 257	val_258
 257	val_258
+258	val_258
+258	val_259
 259	val_260
 259	val_260
 263	val_263
@@ -793,10 +985,16 @@ POSTHOOK: Input: default@srcbucket
 265	val_265
 265	val_266
 267	val_268
+268	val_269
 273	val_273
 273	val_273
 273	val_273
 273	val_274
+274	val_274
+274	val_275
+278	val_278
+278	val_278
+278	val_279
 281	val_281
 281	val_281
 281	val_282
@@ -805,8 +1003,18 @@ POSTHOOK: Input: default@srcbucket
 283	val_283
 293	val_294
 293	val_294
+300	val_301
+300	val_301
 303	val_304
 303	val_304
+318	val_318
+318	val_318
+318	val_318
+318	val_319
+322	val_322
+322	val_322
+322	val_323
+330	val_331
 331	val_331
 331	val_331
 331	val_332
@@ -814,6 +1022,10 @@ POSTHOOK: Input: default@srcbucket
 335	val_335
 335	val_336
 335	val_336
+356	val_356
+356	val_357
+356	val_357
+358	val_359
 367	val_367
 367	val_367
 367	val_368
@@ -823,8 +1035,15 @@ POSTHOOK: Input: default@srcbucket
 379	val_379
 379	val_380
 381	val_382
+382	val_382
+382	val_382
+382	val_383
+382	val_383
 385	val_386
 385	val_386
+390	val_391
+390	val_391
+390	val_391
 395	val_395
 395	val_395
 395	val_396
@@ -839,7 +1058,16 @@ POSTHOOK: Input: default@srcbucket
 403	val_403
 403	val_403
 405	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+406	val_407
+412	val_413
+412	val_413
 415	val_416
+416	val_417
+418	val_418
 429	val_429
 429	val_429
 429	val_430
@@ -848,8 +1076,21 @@ POSTHOOK: Input: default@srcbucket
 431	val_431
 431	val_431
 431	val_432
+436	val_436
+436	val_437
 441	val_442
 447	val_448
+448	val_448
+448	val_449
+468	val_468
+468	val_468
+468	val_468
+468	val_468
+468	val_469
+468	val_469
+468	val_469
+470	val_470
+470	val_471
 475	val_475
 475	val_476
 481	val_481
@@ -859,6 +1100,13 @@ POSTHOOK: Input: default@srcbucket
 489	val_489
 489	val_489
 489	val_490
+492	val_492
+492	val_492
+492	val_493
+492	val_493
+494	val_494
+494	val_495
+494	val_495
 PREHOOK: query: EXPLAIN EXTENDED SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s
 ORDER BY key, value
 PREHOOK: type: QUERY
@@ -899,7 +1147,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000000_0
+            base file name: srcbucket
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -948,7 +1196,7 @@ STAGE PLANS:
               name: default.srcbucket
             name: default.srcbucket
       Truncated Path -> Alias:
-        /srcbucket/000000_0 [s]
+        /srcbucket [s]
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
@@ -996,75 +1244,185 @@ POSTHOOK: Input: default@srcbucket
 #### A masked pattern was here ####
 2	val_2
 2	val_3
+3	val_4
 6	val_7
 6	val_7
 10	val_10
 10	val_11
+17	val_17
+19	val_19
+19	val_20
 20	val_20
 20	val_21
 20	val_21
+21	val_22
+21	val_22
+21	val_22
+21	val_22
+27	val_27
+29	val_30
+29	val_30
 30	val_30
 30	val_31
+31	val_32
 40	val_41
 40	val_41
+41	val_41
+43	val_43
 46	val_47
 48	val_49
 48	val_49
+51	val_51
+51	val_51
+51	val_52
 54	val_54
+57	val_57
 58	val_58
 58	val_58
 58	val_59
 58	val_59
+59	val_60
 60	val_61
+61	val_62
 64	val_64
+65	val_65
+65	val_66
+65	val_66
+67	val_67
+67	val_67
 68	val_69
+69	val_69
+69	val_70
 70	val_70
 70	val_70
 70	val_70
 70	val_71
+77	val_77
+77	val_78
+77	val_78
 80	val_80
 80	val_81
+83	val_83
+83	val_83
 84	val_84
 84	val_84
+85	val_85
+85	val_86
 86	val_86
 86	val_87
+89	val_90
+89	val_90
+89	val_90
 90	val_90
 90	val_90
 90	val_90
+91	val_92
+93	val_94
+93	val_94
+93	val_94
 96	val_96
+97	val_97
+97	val_97
+97	val_98
+97	val_98
 98	val_98
 98	val_98
+99	val_100
+101	val_102
+105	val_105
+105	val_106
+105	val_106
 106	val_107
 110	val_111
+113	val_113
+113	val_113
 116	val_116
 116	val_117
+117	val_118
+117	val_118
+119	val_119
+119	val_119
+119	val_119
+119	val_120
+119	val_120
+119	val_120
+121	val_122
+121	val_122
+123	val_124
+123	val_124
 126	val_126
 126	val_127
 126	val_127
 132	val_133
 132	val_133
+133	val_133
+133	val_134
 134	val_134
 134	val_134
 134	val_135
+135	val_136
+135	val_136
+135	val_136
+137	val_137
+137	val_137
+137	val_138
 140	val_141
 146	val_146
 146	val_146
+149	val_149
+149	val_149
+149	val_150
+153	val_153
+153	val_154
+153	val_154
 156	val_156
 156	val_157
 156	val_157
+157	val_157
+157	val_158
+157	val_158
 158	val_158
 162	val_162
 162	val_163
+163	val_163
 164	val_164
 164	val_164
 164	val_165
 164	val_165
+165	val_165
+165	val_165
+165	val_166
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+177	val_177
+177	val_178
+177	val_178
 178	val_178
 178	val_179
 178	val_179
+181	val_181
 182	val_183
 184	val_185
+185	val_186
+187	val_187
+187	val_187
+187	val_187
 190	val_190
+191	val_191
+191	val_191
+191	val_192
+195	val_195
+195	val_195
+197	val_197
+197	val_197
+197	val_198
+199	val_199
+199	val_199
+199	val_199
+199	val_200
+201	val_201
 202	val_202
 206	val_207
 206	val_207
@@ -1074,34 +1432,57 @@ POSTHOOK: Input: default@srcbucket
 208	val_208
 212	val_213
 214	val_214
+215	val_216
 216	val_216
 216	val_216
 216	val_217
+221	val_221
+221	val_221
 226	val_226
 226	val_227
 226	val_227
 226	val_227
 226	val_227
+229	val_229
+229	val_229
+231	val_232
+233	val_233
+233	val_233
+237	val_237
+237	val_237
 238	val_238
 238	val_238
 238	val_239
+239	val_239
+239	val_239
+239	val_240
+239	val_240
 240	val_241
+243	val_244
+243	val_244
 244	val_244
 244	val_245
 244	val_245
 244	val_245
 248	val_248
 248	val_249
+249	val_249
+249	val_250
+249	val_250
 252	val_252
 252	val_253
 254	val_255
+255	val_255
+255	val_255
 256	val_256
 256	val_256
 256	val_257
 260	val_260
 260	val_261
 260	val_261
+261	val_262
 266	val_266
+271	val_272
 272	val_272
 272	val_272
 272	val_273
@@ -1111,10 +1492,20 @@ POSTHOOK: Input: default@srcbucket
 284	val_285
 286	val_286
 286	val_287
+287	val_287
+287	val_288
+287	val_288
+289	val_289
+289	val_290
+291	val_291
+291	val_292
+291	val_292
 292	val_292
 292	val_293
 292	val_293
 304	val_305
+307	val_307
+307	val_307
 308	val_308
 308	val_309
 308	val_309
@@ -1122,37 +1513,81 @@ POSTHOOK: Input: default@srcbucket
 310	val_311
 310	val_311
 310	val_311
+311	val_311
+311	val_311
+311	val_311
+313	val_314
+315	val_315
 316	val_316
 316	val_316
 316	val_316
+317	val_317
+317	val_317
+317	val_318
 324	val_325
+325	val_325
+325	val_325
 326	val_327
+327	val_327
+327	val_327
+327	val_327
 332	val_332
 334	val_335
 336	val_336
 336	val_337
+337	val_338
 338	val_338
 338	val_339
+339	val_339
+341	val_341
+341	val_342
+341	val_342
+341	val_342
 342	val_342
 342	val_342
 342	val_343
+343	val_344
 344	val_344
 344	val_344
 344	val_345
+347	val_348
+347	val_348
 348	val_348
 348	val_348
 348	val_348
 348	val_348
 348	val_348
 348	val_349
+349	val_350
+349	val_350
+349	val_350
+349	val_350
+351	val_351
+351	val_352
+351	val_352
 352	val_353
 352	val_353
+353	val_353
+353	val_353
+353	val_354
+355	val_356
+355	val_356
 360	val_360
 360	val_361
 362	val_362
 364	val_364
 364	val_365
+365	val_365
 368	val_368
+369	val_369
+369	val_369
+369	val_369
+369	val_370
+371	val_372
+371	val_372
+371	val_372
+371	val_372
+377	val_377
 378	val_378
 378	val_379
 384	val_384
@@ -1165,13 +1600,24 @@ POSTHOOK: Input: default@srcbucket
 386	val_387
 386	val_387
 388	val_389
+391	val_392
+391	val_392
 392	val_392
 392	val_393
 392	val_393
+393	val_393
+393	val_394
+393	val_394
 394	val_394
 396	val_396
 396	val_396
 396	val_396
+397	val_397
+397	val_397
+399	val_399
+399	val_399
+399	val_400
+399	val_400
 402	val_402
 402	val_403
 402	val_403
@@ -1181,13 +1627,36 @@ POSTHOOK: Input: default@srcbucket
 404	val_405
 404	val_405
 404	val_405
+407	val_407
+407	val_408
+407	val_408
+407	val_408
 408	val_409
 408	val_409
+409	val_409
+409	val_409
+409	val_409
+409	val_410
+409	val_410
 410	val_411
+411	val_411
+411	val_412
 414	val_414
 414	val_414
 414	val_415
+417	val_417
+417	val_417
+417	val_417
+419	val_419
+421	val_421
+421	val_422
+421	val_422
+423	val_424
+425	val_426
 426	val_427
+427	val_427
+427	val_428
+427	val_428
 428	val_429
 430	val_430
 430	val_430
@@ -1195,13 +1664,24 @@ POSTHOOK: Input: default@srcbucket
 430	val_431
 432	val_432
 432	val_433
+435	val_435
+435	val_436
+437	val_437
+437	val_438
 440	val_441
 440	val_441
+443	val_443
+443	val_444
+443	val_444
+443	val_444
 444	val_444
 446	val_446
 446	val_447
 446	val_447
+449	val_449
 452	val_452
+453	val_453
+453	val_454
 454	val_454
 454	val_454
 454	val_454
@@ -1209,10 +1689,19 @@ POSTHOOK: Input: default@srcbucket
 454	val_455
 458	val_458
 458	val_458
+459	val_459
+459	val_459
+459	val_460
+463	val_463
+463	val_463
+463	val_464
 466	val_466
 466	val_466
 466	val_466
+467	val_467
+467	val_468
 472	val_472
+473	val_474
 474	val_475
 474	val_475
 476	val_477
@@ -1221,6 +1710,7 @@ POSTHOOK: Input: default@srcbucket
 478	val_478
 478	val_479
 478	val_479
+479	val_479
 480	val_480
 480	val_480
 480	val_480
@@ -1228,11 +1718,23 @@ POSTHOOK: Input: default@srcbucket
 480	val_481
 482	val_482
 482	val_483
+483	val_483
 484	val_484
 484	val_485
+485	val_485
+485	val_486
+485	val_486
 488	val_489
 490	val_490
 490	val_491
+491	val_491
+491	val_492
+491	val_492
+495	val_495
+495	val_496
+497	val_497
+497	val_498
+497	val_498
 498	val_498
 498	val_498
 498	val_498
@@ -2239,57 +2741,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000000_0
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-              bucket_count 4
-              bucket_field_name key
-              bucketing_version 2
-              column.name.delimiter ,
-              columns key,value
-              columns.comments 
-              columns.types int:string
-#### A masked pattern was here ####
-              name default.srcbucket2
-              numFiles 4
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct srcbucket2 { i32 key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                bucket_count 4
-                bucket_field_name key
-                bucketing_version 2
-                column.name.delimiter ,
-                columns key,value
-                columns.comments 
-                columns.types int:string
-#### A masked pattern was here ####
-                name default.srcbucket2
-                numFiles 4
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct srcbucket2 { i32 key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcbucket2
-            name: default.srcbucket2
-#### A masked pattern was here ####
-          Partition
-            base file name: 000002_0
+            base file name: srcbucket2
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -2338,8 +2790,7 @@ STAGE PLANS:
               name: default.srcbucket2
             name: default.srcbucket2
       Truncated Path -> Alias:
-        /srcbucket2/000000_0 [s]
-        /srcbucket2/000002_0 [s]
+        /srcbucket2 [s]
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
@@ -2387,38 +2838,91 @@ POSTHOOK: Input: default@srcbucket2
 #### A masked pattern was here ####
 2	val_2
 10	val_10
+17	val_17
+19	val_19
 20	val_20
+27	val_27
 30	val_30
+41	val_41
+43	val_43
+51	val_51
+51	val_51
 54	val_54
+57	val_57
 58	val_58
 58	val_58
 64	val_64
+65	val_65
+67	val_67
+67	val_67
+69	val_69
 70	val_70
 70	val_70
 70	val_70
+77	val_77
 80	val_80
+83	val_83
+83	val_83
 84	val_84
 84	val_84
+85	val_85
 86	val_86
 90	val_90
 90	val_90
 90	val_90
 96	val_96
+97	val_97
+97	val_97
 98	val_98
 98	val_98
+105	val_105
+113	val_113
+113	val_113
 116	val_116
+119	val_119
+119	val_119
+119	val_119
 126	val_126
+133	val_133
 134	val_134
 134	val_134
+137	val_137
+137	val_137
 146	val_146
 146	val_146
+149	val_149
+149	val_149
+153	val_153
 156	val_156
+157	val_157
 158	val_158
 162	val_162
+163	val_163
 164	val_164
 164	val_164
+165	val_165
+165	val_165
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+177	val_177
 178	val_178
+181	val_181
+187	val_187
+187	val_187
+187	val_187
 190	val_190
+191	val_191
+191	val_191
+195	val_195
+195	val_195
+197	val_197
+197	val_197
+199	val_199
+199	val_199
+199	val_199
+201	val_201
 202	val_202
 208	val_208
 208	val_208
@@ -2426,12 +2930,25 @@ POSTHOOK: Input: default@srcbucket2
 214	val_214
 216	val_216
 216	val_216
+221	val_221
+221	val_221
 226	val_226
+229	val_229
+229	val_229
+233	val_233
+233	val_233
+237	val_237
+237	val_237
 238	val_238
 238	val_238
+239	val_239
+239	val_239
 244	val_244
 248	val_248
+249	val_249
 252	val_252
+255	val_255
+255	val_255
 256	val_256
 256	val_256
 260	val_260
@@ -2440,15 +2957,33 @@ POSTHOOK: Input: default@srcbucket2
 272	val_272
 284	val_284
 286	val_286
+287	val_287
+289	val_289
+291	val_291
 292	val_292
+307	val_307
+307	val_307
 308	val_308
 310	val_310
+311	val_311
+311	val_311
+311	val_311
+315	val_315
 316	val_316
 316	val_316
 316	val_316
+317	val_317
+317	val_317
+325	val_325
+325	val_325
+327	val_327
+327	val_327
+327	val_327
 332	val_332
 336	val_336
 338	val_338
+339	val_339
+341	val_341
 342	val_342
 342	val_342
 344	val_344
@@ -2458,49 +2993,89 @@ POSTHOOK: Input: default@srcbucket2
 348	val_348
 348	val_348
 348	val_348
+351	val_351
+353	val_353
+353	val_353
 360	val_360
 362	val_362
 364	val_364
+365	val_365
 368	val_368
+369	val_369
+369	val_369
+369	val_369
+377	val_377
 378	val_378
 384	val_384
 384	val_384
 384	val_384
 386	val_386
 392	val_392
+393	val_393
 394	val_394
 396	val_396
 396	val_396
 396	val_396
+397	val_397
+397	val_397
+399	val_399
+399	val_399
 402	val_402
 404	val_404
 404	val_404
+407	val_407
+409	val_409
+409	val_409
+409	val_409
+411	val_411
 414	val_414
 414	val_414
+417	val_417
+417	val_417
+417	val_417
+419	val_419
+421	val_421
+427	val_427
 430	val_430
 430	val_430
 430	val_430
 432	val_432
+435	val_435
+437	val_437
+443	val_443
 444	val_444
 446	val_446
+449	val_449
 452	val_452
+453	val_453
 454	val_454
 454	val_454
 454	val_454
 458	val_458
 458	val_458
+459	val_459
+459	val_459
+463	val_463
+463	val_463
 466	val_466
 466	val_466
 466	val_466
+467	val_467
 472	val_472
 478	val_478
 478	val_478
+479	val_479
 480	val_480
 480	val_480
 480	val_480
 482	val_482
+483	val_483
 484	val_484
+485	val_485
 490	val_490
+491	val_491
+495	val_495
+497	val_497
 498	val_498
 498	val_498
 498	val_498
@@ -2544,7 +3119,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000001_0
+            base file name: srcbucket2
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -2593,7 +3168,7 @@ STAGE PLANS:
               name: default.srcbucket2
             name: default.srcbucket2
       Truncated Path -> Alias:
-        /srcbucket2/000001_0 [s]
+        /srcbucket2 [s]
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
@@ -2639,38 +3214,132 @@ ORDER BY key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcbucket2
 #### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+4	val_4
+8	val_8
 9	val_9
+11	val_11
+18	val_18
+18	val_18
+26	val_26
+26	val_26
+34	val_34
 37	val_37
 37	val_37
+66	val_66
+72	val_72
+72	val_72
+74	val_74
+78	val_78
+82	val_82
+92	val_92
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+118	val_118
+118	val_118
+120	val_120
+120	val_120
+128	val_128
+128	val_128
+128	val_128
+131	val_131
+136	val_136
+143	val_143
+150	val_150
+152	val_152
+152	val_152
+155	val_155
+160	val_160
+174	val_174
+174	val_174
+175	val_175
+175	val_175
+176	val_176
+176	val_176
+179	val_179
+179	val_179
+183	val_183
+186	val_186
 189	val_189
+194	val_194
+200	val_200
+200	val_200
 205	val_205
 205	val_205
 217	val_217
 217	val_217
+218	val_218
+222	val_222
+223	val_223
+223	val_223
+228	val_228
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+235	val_235
+247	val_247
+262	val_262
+275	val_275
 277	val_277
 277	val_277
 277	val_277
 277	val_277
+280	val_280
+280	val_280
+282	val_282
+282	val_282
 285	val_285
+288	val_288
+288	val_288
+296	val_296
+298	val_298
+298	val_298
+298	val_298
+302	val_302
 305	val_305
+306	val_306
 309	val_309
 309	val_309
 321	val_321
 321	val_321
+323	val_323
 333	val_333
 333	val_333
 345	val_345
+366	val_366
+374	val_374
+375	val_375
 389	val_389
+400	val_400
 413	val_413
 413	val_413
+424	val_424
+424	val_424
+438	val_438
+438	val_438
+438	val_438
+439	val_439
+439	val_439
+455	val_455
 457	val_457
+460	val_460
+462	val_462
+462	val_462
 469	val_469
 469	val_469
 469	val_469
 469	val_469
 469	val_469
 477	val_477
+487	val_487
 493	val_493
+496	val_496
 PREHOOK: query: CREATE TABLE empty_bucket (key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -2714,6 +3383,61 @@ STAGE PLANS:
                   tag: -1
                   auto parallelism: false
       Execution mode: vectorized
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: empty_bucket
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count 2
+              bucket_field_name key
+              bucketing_version 2
+              column.name.delimiter ,
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.empty_bucket
+              numFiles 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct empty_bucket { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 0
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count 2
+                bucket_field_name key
+                bucketing_version 2
+                column.name.delimiter ,
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.empty_bucket
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct empty_bucket { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.empty_bucket
+            name: default.empty_bucket
+      Truncated Path -> Alias:
+        /empty_bucket [s]
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/sample7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample7.q.out b/ql/src/test/results/clientpositive/sample7.q.out
index 0e2fc28..e198b1f 100644
--- a/ql/src/test/results/clientpositive/sample7.q.out
+++ b/ql/src/test/results/clientpositive/sample7.q.out
@@ -97,7 +97,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000000_0
+            base file name: srcbucket
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -146,7 +146,7 @@ STAGE PLANS:
               name: default.srcbucket
             name: default.srcbucket
       Truncated Path -> Alias:
-        /srcbucket/000000_0 [s]
+        /srcbucket [s]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -435,33 +435,86 @@ order by key, value
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest1_n160
 #### A masked pattern was here ####
+105	val_105
+105	val_106
+105	val_106
 110	val_111
+113	val_113
+113	val_113
 116	val_116
 116	val_117
+117	val_118
+117	val_118
+119	val_119
+119	val_119
+119	val_119
+119	val_120
+119	val_120
+119	val_120
+121	val_122
+121	val_122
+123	val_124
+123	val_124
 126	val_126
 126	val_127
 126	val_127
 134	val_134
 134	val_134
 134	val_135
+137	val_137
+137	val_137
+137	val_138
+153	val_153
+153	val_154
+153	val_154
 156	val_156
 156	val_157
 156	val_157
+157	val_157
+157	val_158
+157	val_158
 158	val_158
+163	val_163
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+177	val_177
+177	val_178
+177	val_178
 178	val_178
 178	val_179
 178	val_179
 184	val_185
+187	val_187
+187	val_187
+187	val_187
+195	val_195
+195	val_195
+197	val_197
+197	val_197
+197	val_198
 206	val_207
 206	val_207
 206	val_207
 208	val_208
 208	val_208
 208	val_208
+221	val_221
+221	val_221
+229	val_229
+229	val_229
+237	val_237
+237	val_237
+243	val_244
+243	val_244
 244	val_244
 244	val_245
 244	val_245
 244	val_245
+249	val_249
+249	val_250
+249	val_250
 252	val_252
 252	val_253
 254	val_255
@@ -469,27 +522,40 @@ POSTHOOK: Input: default@dest1_n160
 256	val_256
 256	val_257
 266	val_266
+271	val_272
 272	val_272
 272	val_272
 272	val_273
 286	val_286
 286	val_287
+289	val_289
+289	val_290
 292	val_292
 292	val_293
 292	val_293
 304	val_305
+307	val_307
+307	val_307
 308	val_308
 308	val_309
 308	val_309
+315	val_315
 316	val_316
 316	val_316
 316	val_316
+317	val_317
+317	val_317
+317	val_318
 326	val_327
+327	val_327
+327	val_327
+327	val_327
 334	val_335
 336	val_336
 336	val_337
 338	val_338
 338	val_339
+339	val_339
 342	val_342
 342	val_342
 342	val_343
@@ -502,21 +568,45 @@ POSTHOOK: Input: default@dest1_n160
 348	val_348
 348	val_348
 348	val_349
+349	val_350
+349	val_350
+349	val_350
+349	val_350
 352	val_353
 352	val_353
+353	val_353
+353	val_353
+353	val_354
+355	val_356
+355	val_356
 360	val_360
 360	val_361
 362	val_362
 364	val_364
 364	val_365
+369	val_369
+369	val_369
+369	val_369
+369	val_370
+371	val_372
+371	val_372
+371	val_372
+371	val_372
+377	val_377
 378	val_378
 378	val_379
+391	val_392
+391	val_392
 392	val_392
 392	val_393
 392	val_393
 396	val_396
 396	val_396
 396	val_396
+399	val_399
+399	val_399
+399	val_400
+399	val_400
 402	val_402
 402	val_403
 402	val_403
@@ -526,15 +616,31 @@ POSTHOOK: Input: default@dest1_n160
 404	val_405
 404	val_405
 404	val_405
+407	val_407
+407	val_408
+407	val_408
+407	val_408
 408	val_409
 408	val_409
 410	val_411
+417	val_417
+417	val_417
+417	val_417
+419	val_419
+423	val_424
 426	val_427
+427	val_427
+427	val_428
+427	val_428
 440	val_441
 440	val_441
+449	val_449
 452	val_452
 458	val_458
 458	val_458
+463	val_463
+463	val_463
+463	val_464
 466	val_466
 466	val_466
 466	val_466
@@ -545,7 +651,11 @@ POSTHOOK: Input: default@dest1_n160
 478	val_478
 478	val_479
 478	val_479
+479	val_479
 482	val_482
 482	val_483
 484	val_484
 484	val_485
+497	val_497
+497	val_498
+497	val_498

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/sample9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample9.q.out b/ql/src/test/results/clientpositive/sample9.q.out
index 0de49a6..4819dc1 100644
--- a/ql/src/test/results/clientpositive/sample9.q.out
+++ b/ql/src/test/results/clientpositive/sample9.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000000_0
+            base file name: srcbucket
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -104,7 +104,7 @@ STAGE PLANS:
               name: default.srcbucket
             name: default.srcbucket
       Truncated Path -> Alias:
-        /srcbucket/000000_0 [s:a]
+        /srcbucket [s:a]
 
   Stage: Stage-0
     Fetch Operator
@@ -126,75 +126,185 @@ POSTHOOK: Input: default@srcbucket
 #### A masked pattern was here ####
 2	val_2
 2	val_3
+3	val_4
 6	val_7
 6	val_7
 10	val_10
 10	val_11
+17	val_17
+19	val_19
+19	val_20
 20	val_20
 20	val_21
 20	val_21
+21	val_22
+21	val_22
+21	val_22
+21	val_22
+27	val_27
+29	val_30
+29	val_30
 30	val_30
 30	val_31
+31	val_32
 40	val_41
 40	val_41
+41	val_41
+43	val_43
 46	val_47
 48	val_49
 48	val_49
+51	val_51
+51	val_51
+51	val_52
 54	val_54
+57	val_57
 58	val_58
 58	val_58
 58	val_59
 58	val_59
+59	val_60
 60	val_61
+61	val_62
 64	val_64
+65	val_65
+65	val_66
+65	val_66
+67	val_67
+67	val_67
 68	val_69
+69	val_69
+69	val_70
 70	val_70
 70	val_70
 70	val_70
 70	val_71
+77	val_77
+77	val_78
+77	val_78
 80	val_80
 80	val_81
+83	val_83
+83	val_83
 84	val_84
 84	val_84
+85	val_85
+85	val_86
 86	val_86
 86	val_87
+89	val_90
+89	val_90
+89	val_90
 90	val_90
 90	val_90
 90	val_90
+91	val_92
+93	val_94
+93	val_94
+93	val_94
 96	val_96
+97	val_97
+97	val_97
+97	val_98
+97	val_98
 98	val_98
 98	val_98
+99	val_100
+101	val_102
+105	val_105
+105	val_106
+105	val_106
 106	val_107
 110	val_111
+113	val_113
+113	val_113
 116	val_116
 116	val_117
+117	val_118
+117	val_118
+119	val_119
+119	val_119
+119	val_119
+119	val_120
+119	val_120
+119	val_120
+121	val_122
+121	val_122
+123	val_124
+123	val_124
 126	val_126
 126	val_127
 126	val_127
 132	val_133
 132	val_133
+133	val_133
+133	val_134
 134	val_134
 134	val_134
 134	val_135
+135	val_136
+135	val_136
+135	val_136
+137	val_137
+137	val_137
+137	val_138
 140	val_141
 146	val_146
 146	val_146
+149	val_149
+149	val_149
+149	val_150
+153	val_153
+153	val_154
+153	val_154
 156	val_156
 156	val_157
 156	val_157
+157	val_157
+157	val_158
+157	val_158
 158	val_158
 162	val_162
 162	val_163
+163	val_163
 164	val_164
 164	val_164
 164	val_165
 164	val_165
+165	val_165
+165	val_165
+165	val_166
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+177	val_177
+177	val_178
+177	val_178
 178	val_178
 178	val_179
 178	val_179
+181	val_181
 182	val_183
 184	val_185
+185	val_186
+187	val_187
+187	val_187
+187	val_187
 190	val_190
+191	val_191
+191	val_191
+191	val_192
+195	val_195
+195	val_195
+197	val_197
+197	val_197
+197	val_198
+199	val_199
+199	val_199
+199	val_199
+199	val_200
+201	val_201
 202	val_202
 206	val_207
 206	val_207
@@ -204,34 +314,57 @@ POSTHOOK: Input: default@srcbucket
 208	val_208
 212	val_213
 214	val_214
+215	val_216
 216	val_216
 216	val_216
 216	val_217
+221	val_221
+221	val_221
 226	val_226
 226	val_227
 226	val_227
 226	val_227
 226	val_227
+229	val_229
+229	val_229
+231	val_232
+233	val_233
+233	val_233
+237	val_237
+237	val_237
 238	val_238
 238	val_238
 238	val_239
+239	val_239
+239	val_239
+239	val_240
+239	val_240
 240	val_241
+243	val_244
+243	val_244
 244	val_244
 244	val_245
 244	val_245
 244	val_245
 248	val_248
 248	val_249
+249	val_249
+249	val_250
+249	val_250
 252	val_252
 252	val_253
 254	val_255
+255	val_255
+255	val_255
 256	val_256
 256	val_256
 256	val_257
 260	val_260
 260	val_261
 260	val_261
+261	val_262
 266	val_266
+271	val_272
 272	val_272
 272	val_272
 272	val_273
@@ -241,10 +374,20 @@ POSTHOOK: Input: default@srcbucket
 284	val_285
 286	val_286
 286	val_287
+287	val_287
+287	val_288
+287	val_288
+289	val_289
+289	val_290
+291	val_291
+291	val_292
+291	val_292
 292	val_292
 292	val_293
 292	val_293
 304	val_305
+307	val_307
+307	val_307
 308	val_308
 308	val_309
 308	val_309
@@ -252,37 +395,81 @@ POSTHOOK: Input: default@srcbucket
 310	val_311
 310	val_311
 310	val_311
+311	val_311
+311	val_311
+311	val_311
+313	val_314
+315	val_315
 316	val_316
 316	val_316
 316	val_316
+317	val_317
+317	val_317
+317	val_318
 324	val_325
+325	val_325
+325	val_325
 326	val_327
+327	val_327
+327	val_327
+327	val_327
 332	val_332
 334	val_335
 336	val_336
 336	val_337
+337	val_338
 338	val_338
 338	val_339
+339	val_339
+341	val_341
+341	val_342
+341	val_342
+341	val_342
 342	val_342
 342	val_342
 342	val_343
+343	val_344
 344	val_344
 344	val_344
 344	val_345
+347	val_348
+347	val_348
 348	val_348
 348	val_348
 348	val_348
 348	val_348
 348	val_348
 348	val_349
+349	val_350
+349	val_350
+349	val_350
+349	val_350
+351	val_351
+351	val_352
+351	val_352
 352	val_353
 352	val_353
+353	val_353
+353	val_353
+353	val_354
+355	val_356
+355	val_356
 360	val_360
 360	val_361
 362	val_362
 364	val_364
 364	val_365
+365	val_365
 368	val_368
+369	val_369
+369	val_369
+369	val_369
+369	val_370
+371	val_372
+371	val_372
+371	val_372
+371	val_372
+377	val_377
 378	val_378
 378	val_379
 384	val_384
@@ -295,13 +482,24 @@ POSTHOOK: Input: default@srcbucket
 386	val_387
 386	val_387
 388	val_389
+391	val_392
+391	val_392
 392	val_392
 392	val_393
 392	val_393
+393	val_393
+393	val_394
+393	val_394
 394	val_394
 396	val_396
 396	val_396
 396	val_396
+397	val_397
+397	val_397
+399	val_399
+399	val_399
+399	val_400
+399	val_400
 402	val_402
 402	val_403
 402	val_403
@@ -311,13 +509,36 @@ POSTHOOK: Input: default@srcbucket
 404	val_405
 404	val_405
 404	val_405
+407	val_407
+407	val_408
+407	val_408
+407	val_408
 408	val_409
 408	val_409
+409	val_409
+409	val_409
+409	val_409
+409	val_410
+409	val_410
 410	val_411
+411	val_411
+411	val_412
 414	val_414
 414	val_414
 414	val_415
+417	val_417
+417	val_417
+417	val_417
+419	val_419
+421	val_421
+421	val_422
+421	val_422
+423	val_424
+425	val_426
 426	val_427
+427	val_427
+427	val_428
+427	val_428
 428	val_429
 430	val_430
 430	val_430
@@ -325,13 +546,24 @@ POSTHOOK: Input: default@srcbucket
 430	val_431
 432	val_432
 432	val_433
+435	val_435
+435	val_436
+437	val_437
+437	val_438
 440	val_441
 440	val_441
+443	val_443
+443	val_444
+443	val_444
+443	val_444
 444	val_444
 446	val_446
 446	val_447
 446	val_447
+449	val_449
 452	val_452
+453	val_453
+453	val_454
 454	val_454
 454	val_454
 454	val_454
@@ -339,10 +571,19 @@ POSTHOOK: Input: default@srcbucket
 454	val_455
 458	val_458
 458	val_458
+459	val_459
+459	val_459
+459	val_460
+463	val_463
+463	val_463
+463	val_464
 466	val_466
 466	val_466
 466	val_466
+467	val_467
+467	val_468
 472	val_472
+473	val_474
 474	val_475
 474	val_475
 476	val_477
@@ -351,6 +592,7 @@ POSTHOOK: Input: default@srcbucket
 478	val_478
 478	val_479
 478	val_479
+479	val_479
 480	val_480
 480	val_480
 480	val_480
@@ -358,11 +600,23 @@ POSTHOOK: Input: default@srcbucket
 480	val_481
 482	val_482
 482	val_483
+483	val_483
 484	val_484
 484	val_485
+485	val_485
+485	val_486
+485	val_486
 488	val_489
 490	val_490
 490	val_491
+491	val_491
+491	val_492
+491	val_492
+495	val_495
+495	val_496
+497	val_497
+497	val_498
+497	val_498
 498	val_498
 498	val_498
 498	val_498

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
index a83f3e6..f4be7df 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
@@ -1932,7 +1932,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000001_0
+            base file name: ds=1
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -1982,7 +1982,7 @@ STAGE PLANS:
               name: default.test_table1_n1
             name: default.test_table1_n1
       Truncated Path -> Alias:
-        /test_table1_n1/ds=1/000001_0 [test_table1_n1]
+        /test_table1_n1/ds=1 [test_table1_n1]
 
   Stage: Stage-0
     Fetch Operator
@@ -2043,7 +2043,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: 000001_0
+            base file name: ds=1
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             partition values:
@@ -2092,7 +2092,7 @@ STAGE PLANS:
               name: default.test_table3_n1
             name: default.test_table3_n1
       Truncated Path -> Alias:
-        /test_table3_n1/ds=1/000001_0 [test_table3_n1]
+        /test_table3_n1/ds=1 [test_table3_n1]
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
index 8fab7ec..7a2407c 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
@@ -67,7 +67,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test_table_bucketed
 POSTHOOK: Input: default@test_table_bucketed@part=1
 POSTHOOK: Output: hdfs://### HDFS PATH ###
-0
+177
 PREHOOK: query: SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 2 OUT OF 3) WHERE part = '1'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@test_table_bucketed

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/spark/sample10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample10.q.out b/ql/src/test/results/clientpositive/spark/sample10.q.out
index 555e5f4..3b14273 100644
--- a/ql/src/test/results/clientpositive/spark/sample10.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample10.q.out
@@ -88,7 +88,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000001_0
+                  base file name: hr=11
                   input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   partition values:
@@ -139,7 +139,7 @@ STAGE PLANS:
                   name: default.srcpartbucket
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000001_0
+                  base file name: hr=12
                   input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   partition values:
@@ -190,7 +190,7 @@ STAGE PLANS:
                   name: default.srcpartbucket
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000001_0
+                  base file name: hr=11
                   input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   partition values:
@@ -241,7 +241,7 @@ STAGE PLANS:
                   name: default.srcpartbucket
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000001_0
+                  base file name: hr=12
                   input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   partition values:
@@ -291,10 +291,10 @@ STAGE PLANS:
                     name: default.srcpartbucket
                   name: default.srcpartbucket
             Truncated Path -> Alias:
-              /srcpartbucket/ds=2008-04-08/hr=11/000001_0 [srcpartbucket]
-              /srcpartbucket/ds=2008-04-08/hr=12/000001_0 [srcpartbucket]
-              /srcpartbucket/ds=2008-04-09/hr=11/000001_0 [srcpartbucket]
-              /srcpartbucket/ds=2008-04-09/hr=12/000001_0 [srcpartbucket]
+              /srcpartbucket/ds=2008-04-08/hr=11 [srcpartbucket]
+              /srcpartbucket/ds=2008-04-08/hr=12 [srcpartbucket]
+              /srcpartbucket/ds=2008-04-09/hr=11 [srcpartbucket]
+              /srcpartbucket/ds=2008-04-09/hr=12 [srcpartbucket]
         Reducer 2 
             Execution mode: vectorized
             Needs Tagging: false

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/spark/sample2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample2.q.out b/ql/src/test/results/clientpositive/spark/sample2.q.out
index 8b73fdf..185253a 100644
--- a/ql/src/test/results/clientpositive/spark/sample2.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample2.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000000_0
+                  base file name: srcbucket
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -127,7 +127,7 @@ STAGE PLANS:
                     name: default.srcbucket
                   name: default.srcbucket
             Truncated Path -> Alias:
-              /srcbucket/000000_0 [s]
+              /srcbucket [s]
 
   Stage: Stage-0
     Move Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/spark/sample4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample4.q.out b/ql/src/test/results/clientpositive/spark/sample4.q.out
index 3269b01..86fb0f0 100644
--- a/ql/src/test/results/clientpositive/spark/sample4.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample4.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000000_0
+                  base file name: srcbucket
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -127,7 +127,7 @@ STAGE PLANS:
                     name: default.srcbucket
                   name: default.srcbucket
             Truncated Path -> Alias:
-              /srcbucket/000000_0 [s]
+              /srcbucket [s]
 
   Stage: Stage-0
     Move Operator


[04/13] hive git commit: HIVE-19946: VectorizedRowBatchCtx.recordIdColumnVector cannot be shared between different JVMs (Teddy Choi via Sergey Shelukhin, Matt McCline)

Posted by se...@apache.org.
HIVE-19946: VectorizedRowBatchCtx.recordIdColumnVector cannot be shared between different JVMs (Teddy Choi via Sergey Shelukhin, Matt McCline)

Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d1fb780e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d1fb780e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d1fb780e

Branch: refs/heads/master-txnstats
Commit: d1fb780e5611807c20818bfe127e9b1283b6ea45
Parents: eaf416e
Author: Teddy Choi <pu...@gmail.com>
Authored: Tue Jun 26 09:26:28 2018 +0200
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Tue Jun 26 09:26:28 2018 +0200

----------------------------------------------------------------------
 .../hive/ql/exec/vector/VectorMapOperator.java       |  5 ++---
 .../hive/ql/exec/vector/VectorizedRowBatchCtx.java   | 15 ---------------
 .../ql/io/orc/VectorizedOrcAcidRowBatchReader.java   |  4 ++--
 3 files changed, 4 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d1fb780e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java
index 2542e03..bd70991 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java
@@ -803,10 +803,9 @@ public class VectorMapOperator extends AbstractMapOperator {
       VectorizedRowBatch batch = (VectorizedRowBatch) value;
       numRows += batch.size;
       if (hasRowIdentifier) {
-        if (batchContext.getRecordIdColumnVector() == null) {
+        final int idx = batchContext.findVirtualColumnNum(VirtualColumn.ROWID);
+        if (idx < 0) {
           setRowIdentiferToNull(batch);
-        } else {
-          batch.cols[rowIdentifierColumnNum] = batchContext.getRecordIdColumnVector();
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/d1fb780e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
index ffbfb6f..c0ae7c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
@@ -84,13 +84,6 @@ public class VectorizedRowBatchCtx {
   private int partitionColumnCount;
   private int virtualColumnCount;
   private VirtualColumn[] neededVirtualColumns;
-  /**
-   * A record ID column is a virtual column, so it should be separated from normal data column
-   * processes. A recordIdColumnVector contains RecordIdentifier information in a
-   * StructColumnVector. It has three LongColumnVectors as its fields; original write IDs,
-   * bucket IDs, and row IDs.
-   */
-  private StructColumnVector recordIdColumnVector;
 
   private String[] scratchColumnTypeNames;
   private DataTypePhysicalVariation[] scratchDataTypePhysicalVariations;
@@ -217,14 +210,6 @@ public class VectorizedRowBatchCtx {
     return scratchDataTypePhysicalVariations;
   }
 
-  public StructColumnVector getRecordIdColumnVector() {
-    return this.recordIdColumnVector;
-  }
-
-  public void setRecordIdColumnVector(StructColumnVector recordIdColumnVector) {
-    this.recordIdColumnVector = recordIdColumnVector;
-  }
-
   /**
    * Initializes the VectorizedRowBatch context based on an scratch column type names and
    * object inspector.

http://git-wip-us.apache.org/repos/asf/hive/blob/d1fb780e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
index e181d8c..1841cfa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
@@ -483,8 +483,8 @@ public class VectorizedOrcAcidRowBatchReader
       }
     }
     if (rowIdProjected) {
-      // TODO: could we just do: int ix = rbCtx.findVirtualColumnNum(VirtualColumn.ROWID); value.cols[ix] = recordIdColumnVector;
-      rbCtx.setRecordIdColumnVector(recordIdColumnVector);
+      int ix = rbCtx.findVirtualColumnNum(VirtualColumn.ROWID);
+      value.cols[ix] = recordIdColumnVector;
     }
     progress = baseReader.getProgress();
     return true;


[06/13] hive git commit: HIVE-19649: Clean up inputs in JDBC PreparedStatement. Add unit tests. (Mykhailo Kysliuk via Zoltan Haindrich)

Posted by se...@apache.org.
HIVE-19649: Clean up inputs in JDBC PreparedStatement. Add unit tests. (Mykhailo Kysliuk via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ee14e36c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ee14e36c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ee14e36c

Branch: refs/heads/master-txnstats
Commit: ee14e36c0a1f99178bf1e98042cf72cc2972640d
Parents: 4abc64c
Author: Mykhailo Kysliuk <mi...@gmail.com>
Authored: Tue Jun 26 11:41:58 2018 +0200
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Tue Jun 26 11:41:58 2018 +0200

----------------------------------------------------------------------
 .../hive/jdbc/TestHivePreparedStatement.java    | 277 +++++++++++--------
 1 file changed, 167 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ee14e36c/jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java
----------------------------------------------------------------------
diff --git a/jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java b/jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java
index 2a68c91..b641395 100644
--- a/jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java
+++ b/jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.times;
 
 import java.sql.SQLException;
 
@@ -34,122 +35,178 @@ import org.apache.hive.service.rpc.thrift.TOperationState;
 import org.apache.hive.service.rpc.thrift.TSessionHandle;
 import org.apache.hive.service.rpc.thrift.TStatus;
 import org.apache.hive.service.rpc.thrift.TStatusCode;
+import org.apache.hive.service.rpc.thrift.TCloseOperationResp;
+import org.apache.hive.service.rpc.thrift.TCloseOperationReq;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
 
+import java.io.ByteArrayInputStream;
+
 public class TestHivePreparedStatement {
 
-	@Mock
-	private HiveConnection connection;
-	@Mock
-	private Iface client;
-	@Mock
-	private TSessionHandle sessHandle;
-	@Mock
-	TExecuteStatementResp tExecStatementResp;
-	@Mock
-	TGetOperationStatusResp tGetOperationStatusResp;
-	private TStatus tStatus_SUCCESS = new TStatus(TStatusCode.SUCCESS_STATUS);
-	@Mock
-	private TOperationHandle tOperationHandle;
-
-	@Before
-	public void before() throws Exception {
-		MockitoAnnotations.initMocks(this);
-		when(tExecStatementResp.getStatus()).thenReturn(tStatus_SUCCESS);
-		when(tExecStatementResp.getOperationHandle()).thenReturn(tOperationHandle);
-
-		when(tGetOperationStatusResp.getStatus()).thenReturn(tStatus_SUCCESS);
-		when(tGetOperationStatusResp.getOperationState()).thenReturn(TOperationState.FINISHED_STATE);
-		when(tGetOperationStatusResp.isSetOperationState()).thenReturn(true);
-		when(tGetOperationStatusResp.isSetOperationCompleted()).thenReturn(true);
-
-		when(client.GetOperationStatus(any(TGetOperationStatusReq.class))).thenReturn(tGetOperationStatusResp);
-		when(client.ExecuteStatement(any(TExecuteStatementReq.class))).thenReturn(tExecStatementResp);
-	}
-
-	@SuppressWarnings("resource")
-	@Test
-	public void testNonParameterized() throws Exception {
-		String sql = "select 1";
-		HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
-		ps.execute();
-
-		ArgumentCaptor<TExecuteStatementReq> argument = ArgumentCaptor.forClass(TExecuteStatementReq.class);
-		verify(client).ExecuteStatement(argument.capture());
-		assertEquals("select 1", argument.getValue().getStatement());
-	}
-
-	@SuppressWarnings("resource")
-	@Test
-	public void unusedArgument() throws Exception {
-		String sql = "select 1";
-		HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
-		ps.setString(1, "asd");
-		ps.execute();
-	}
-
-	@SuppressWarnings("resource")
-	@Test(expected=SQLException.class)
-	public void unsetArgument() throws Exception {
-		String sql = "select 1 from x where a=?";
-		HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
-		ps.execute();
-	}
-
-	@SuppressWarnings("resource")
-	@Test
-	public void oneArgument() throws Exception {
-		String sql = "select 1 from x where a=?";
-		HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
-		ps.setString(1, "asd");
-		ps.execute();
-		
-		ArgumentCaptor<TExecuteStatementReq> argument = ArgumentCaptor.forClass(TExecuteStatementReq.class);
-		verify(client).ExecuteStatement(argument.capture());
-		assertEquals("select 1 from x where a='asd'", argument.getValue().getStatement());
-	}
-	
-	@SuppressWarnings("resource")
-	@Test
-	public void escapingOfStringArgument() throws Exception {
-		String sql = "select 1 from x where a=?";
-		HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
-		ps.setString(1, "a'\"d");
-		ps.execute();
-		
-		ArgumentCaptor<TExecuteStatementReq> argument = ArgumentCaptor.forClass(TExecuteStatementReq.class);
-		verify(client).ExecuteStatement(argument.capture());
-		assertEquals("select 1 from x where a='a\\'\"d'", argument.getValue().getStatement());
-	}
-	
-	@SuppressWarnings("resource")
-	@Test
-	public void pastingIntoQuery() throws Exception {
-		String sql = "select 1 from x where a='e' || ?";
-		HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
-		ps.setString(1, "v");
-		ps.execute();
-		
-		ArgumentCaptor<TExecuteStatementReq> argument = ArgumentCaptor.forClass(TExecuteStatementReq.class);
-		verify(client).ExecuteStatement(argument.capture());
-		assertEquals("select 1 from x where a='e' || 'v'", argument.getValue().getStatement());
-	}
-	
-	// HIVE-13625
-	@SuppressWarnings("resource")
-	@Test
-	public void pastingIntoEscapedQuery() throws Exception {
-		String sql = "select 1 from x where a='\\044e' || ?";
-		HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
-		ps.setString(1, "v");
-		ps.execute();
-		
-		ArgumentCaptor<TExecuteStatementReq> argument = ArgumentCaptor.forClass(TExecuteStatementReq.class);
-		verify(client).ExecuteStatement(argument.capture());
-		assertEquals("select 1 from x where a='\\044e' || 'v'", argument.getValue().getStatement());
-	}
+  @Mock
+  private HiveConnection connection;
+  @Mock
+  private Iface client;
+  @Mock
+  private TSessionHandle sessHandle;
+  @Mock
+  private TExecuteStatementResp tExecStatementResp;
+  @Mock
+  private TGetOperationStatusResp tGetOperationStatusResp;
+  @Mock
+  private TCloseOperationResp tCloseOperationResp;
+  private TStatus tStatusSuccess = new TStatus(TStatusCode.SUCCESS_STATUS);
+  @Mock
+  private TOperationHandle tOperationHandle;
+
+  @Before
+  public void before() throws Exception {
+    MockitoAnnotations.initMocks(this);
+    when(tExecStatementResp.getStatus()).thenReturn(tStatusSuccess);
+    when(tExecStatementResp.getOperationHandle()).thenReturn(tOperationHandle);
+
+    when(tGetOperationStatusResp.getStatus()).thenReturn(tStatusSuccess);
+    when(tGetOperationStatusResp.getOperationState()).thenReturn(TOperationState.FINISHED_STATE);
+    when(tGetOperationStatusResp.isSetOperationState()).thenReturn(true);
+    when(tGetOperationStatusResp.isSetOperationCompleted()).thenReturn(true);
+
+    when(tCloseOperationResp.getStatus()).thenReturn(tStatusSuccess);
+
+    when(client.GetOperationStatus(any(TGetOperationStatusReq.class)))
+        .thenReturn(tGetOperationStatusResp);
+    when(client.CloseOperation(any(TCloseOperationReq.class))).thenReturn(tCloseOperationResp);
+    when(client.ExecuteStatement(any(TExecuteStatementReq.class))).thenReturn(tExecStatementResp);
+  }
+
+  @SuppressWarnings("resource")
+  @Test
+  public void testNonParameterized() throws Exception {
+    String sql = "select 1";
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+    ps.execute();
+
+    ArgumentCaptor<TExecuteStatementReq> argument =
+        ArgumentCaptor.forClass(TExecuteStatementReq.class);
+    verify(client).ExecuteStatement(argument.capture());
+    assertEquals("select 1", argument.getValue().getStatement());
+  }
+
+  @SuppressWarnings("resource")
+  @Test
+  public void unusedArgument() throws Exception {
+    String sql = "select 1";
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+    ps.setString(1, "asd");
+    ps.execute();
+  }
+
+  @SuppressWarnings("resource")
+  @Test(expected = SQLException.class)
+  public void unsetArgument()
+      throws Exception {
+    String sql = "select 1 from x where a=?";
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+    ps.execute();
+  }
+
+  @SuppressWarnings("resource")
+  @Test
+  public void oneArgument() throws Exception {
+    String sql = "select 1 from x where a=?";
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+    ps.setString(1, "asd");
+    ps.execute();
+
+    ArgumentCaptor<TExecuteStatementReq> argument =
+        ArgumentCaptor.forClass(TExecuteStatementReq.class);
+    verify(client).ExecuteStatement(argument.capture());
+    assertEquals("select 1 from x where a='asd'", argument.getValue().getStatement());
+  }
+
+  @SuppressWarnings("resource")
+  @Test
+  public void escapingOfStringArgument() throws Exception {
+    String sql = "select 1 from x where a=?";
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+    ps.setString(1, "a'\"d");
+    ps.execute();
+
+    ArgumentCaptor<TExecuteStatementReq> argument =
+        ArgumentCaptor.forClass(TExecuteStatementReq.class);
+    verify(client).ExecuteStatement(argument.capture());
+    assertEquals("select 1 from x where a='a\\'\"d'", argument.getValue().getStatement());
+  }
+
+  @SuppressWarnings("resource")
+  @Test
+  public void pastingIntoQuery() throws Exception {
+    String sql = "select 1 from x where a='e' || ?";
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+    ps.setString(1, "v");
+    ps.execute();
+
+    ArgumentCaptor<TExecuteStatementReq> argument =
+        ArgumentCaptor.forClass(TExecuteStatementReq.class);
+    verify(client).ExecuteStatement(argument.capture());
+    assertEquals("select 1 from x where a='e' || 'v'", argument.getValue().getStatement());
+  }
+
+  // HIVE-13625
+  @SuppressWarnings("resource")
+  @Test
+  public void pastingIntoEscapedQuery() throws Exception {
+    String sql = "select 1 from x where a='\\044e' || ?";
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+    ps.setString(1, "v");
+    ps.execute();
+
+    ArgumentCaptor<TExecuteStatementReq> argument =
+        ArgumentCaptor.forClass(TExecuteStatementReq.class);
+    verify(client).ExecuteStatement(argument.capture());
+    assertEquals("select 1 from x where a='\\044e' || 'v'", argument.getValue().getStatement());
+  }
+
+  @Test
+  public void testSingleQuoteSetString() throws Exception {
+    String sql = "select * from table where value=?";
+    ArgumentCaptor<TExecuteStatementReq> argument =
+        ArgumentCaptor.forClass(TExecuteStatementReq.class);
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+
+    ps.setString(1, "anyValue\\' or 1=1 --");
+    ps.execute();
+    verify(client).ExecuteStatement(argument.capture());
+    assertEquals("select * from table where value='anyValue\\' or 1=1 --'",
+        argument.getValue().getStatement());
+
+    ps.setString(1, "anyValue\\\\' or 1=1 --");
+    ps.execute();
+    verify(client, times(2)).ExecuteStatement(argument.capture());
+    assertEquals("select * from table where value='anyValue\\\\\\' or 1=1 --'",
+        argument.getValue().getStatement());
+  }
+
+  @Test
+  public void testSingleQuoteSetBinaryStream() throws Exception {
+    String sql = "select * from table where value=?";
+    ArgumentCaptor<TExecuteStatementReq> argument =
+        ArgumentCaptor.forClass(TExecuteStatementReq.class);
+    HivePreparedStatement ps = new HivePreparedStatement(connection, client, sessHandle, sql);
+
+    ps.setBinaryStream(1, new ByteArrayInputStream("'anyValue' or 1=1".getBytes()));
+    ps.execute();
+    verify(client).ExecuteStatement(argument.capture());
+    assertEquals("select * from table where value='\\'anyValue\\' or 1=1'",
+        argument.getValue().getStatement());
+
+    ps.setBinaryStream(1, new ByteArrayInputStream("\\'anyValue\\' or 1=1".getBytes()));
+    ps.execute();
+    verify(client, times(2)).ExecuteStatement(argument.capture());
+    assertEquals("select * from table where value='\\'anyValue\\' or 1=1'",
+        argument.getValue().getStatement());
+  }
 }


[12/13] hive git commit: HIVE-19532 : fix tests - update some out files on master-txnstats branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out b/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out
index 8bd95b1..492fe05 100644
--- a/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out
+++ b/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out
@@ -168,8 +168,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
+	numRows             	12288               
+	rawDataSize         	0                   
 	totalSize           	295583              
 	transactional       	true                
 	transactional_properties	default             
@@ -190,54 +193,12 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select count(*) from acid_ivot
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: acid_ivot
-                  Statistics: Num rows: 5864 Data size: 2955830 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 5864 Data size: 2955830 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: may be used (ACID table)
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: -1
+      limit: 1
       Processor Tree:
         ListSink
 
@@ -374,8 +335,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
+	numRows             	2                   
+	rawDataSize         	0                   
 	totalSize           	1663                
 	transactional       	true                
 	transactional_properties	default             
@@ -396,54 +360,12 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select count(*) from acid_ivot
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: acid_ivot
-                  Statistics: Num rows: 32 Data size: 16630 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 32 Data size: 16630 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: may be used (ACID table)
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: -1
+      limit: 1
       Processor Tree:
         ListSink
 
@@ -507,8 +429,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	2                   
+	numRows             	4                   
+	rawDataSize         	0                   
 	totalSize           	3326                
 	transactional       	true                
 	transactional_properties	default             
@@ -529,54 +454,12 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select count(*) from acid_ivot
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: acid_ivot
-                  Statistics: Num rows: 65 Data size: 33260 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 65 Data size: 33260 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: may be used (ACID table)
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: -1
+      limit: 1
       Processor Tree:
         ListSink
 
@@ -636,8 +519,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"cbigint\":\"true\",\"cboolean1\":\"true\",\"cboolean2\":\"true\",\"cdouble\":\"true\",\"cfloat\":\"true\",\"cint\":\"true\",\"csmallint\":\"true\",\"cstring1\":\"true\",\"cstring2\":\"true\",\"ctimestamp1\":\"true\",\"ctimestamp2\":\"true\",\"ctinyint\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	3                   
+	numRows             	12292               
+	rawDataSize         	0                   
 	totalSize           	298909              
 	transactional       	true                
 	transactional_properties	default             
@@ -658,54 +544,12 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select count(*) from acid_ivot
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: acid_ivot
-                  Statistics: Num rows: 5930 Data size: 2989090 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 5930 Data size: 2989090 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
-            Execution mode: vectorized, llap
-            LLAP IO: may be used (ACID table)
-        Reducer 2 
-            Execution mode: vectorized, llap
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: -1
+      limit: 1
       Processor Tree:
         ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
index 6027804..0628ad8 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_3.q.out
@@ -759,19 +759,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2
                   filterExpr: ((c > 10.1) and a is not null) (type: boolean)
-                  Statistics: Num rows: 46 Data size: 5336 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10.1) and a is not null) (type: boolean)
-                    Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -785,17 +785,17 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: int), _col1 (type: decimal(10,2))
                     sort order: ++
                     Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2))
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -803,14 +803,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2))
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1041,19 +1041,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2
                   filterExpr: ((c > 10.1) and a is not null) (type: boolean)
-                  Statistics: Num rows: 46 Data size: 5336 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10.1) and a is not null) (type: boolean)
-                    Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1067,17 +1067,17 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: int), _col1 (type: decimal(10,2))
                     sort order: ++
                     Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2))
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1085,14 +1085,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2))
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out
index b3fd29a..a804637 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out
@@ -945,8 +945,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MATERIALIZED_VIEW   	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	bucketing_version   	2                   
 	numFiles            	3                   
+	numRows             	3                   
+	rawDataSize         	248                 
 	totalSize           	1508                
 	transactional       	true                
 	transactional_properties	default             
@@ -1077,19 +1080,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2_n2
                   filterExpr: ((c > 10) and a is not null) (type: boolean)
-                  Statistics: Num rows: 61 Data size: 7320 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10) and a is not null) (type: boolean)
-                    Statistics: Num rows: 20 Data size: 2400 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2)), d (type: int)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 20 Data size: 2400 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 20 Data size: 2400 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2)), _col2 (type: int)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1103,7 +1106,7 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2, _col3
-                Statistics: Num rows: 33 Data size: 3960 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: sum(_col3)
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
@@ -1299,19 +1302,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2_n2
                   filterExpr: ((c > 10) and a is not null) (type: boolean)
-                  Statistics: Num rows: 75 Data size: 9000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10) and a is not null) (type: boolean)
-                    Statistics: Num rows: 25 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2)), d (type: int)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 25 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 25 Data size: 3000 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2)), _col2 (type: int)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1325,7 +1328,7 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2, _col3
-                Statistics: Num rows: 41 Data size: 4920 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: sum(_col3)
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
@@ -1552,19 +1555,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2_n2
                   filterExpr: ((c > 10) and (ROW__ID.writeid > 4) and a is not null) (type: boolean)
-                  Statistics: Num rows: 91 Data size: 10920 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((ROW__ID.writeid > 4) and (c > 10) and a is not null) (type: boolean)
-                    Statistics: Num rows: 10 Data size: 1200 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2)), d (type: int)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 10 Data size: 1960 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 10 Data size: 1960 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2)), _col2 (type: int)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1662,7 +1665,7 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2, _col3
-                Statistics: Num rows: 16 Data size: 1920 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: sum(_col3)
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
index 9abdcbb..fae4757 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
@@ -402,8 +402,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MATERIALIZED_VIEW   	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"c\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	2                   
+	numRows             	5                   
+	rawDataSize         	348                 
 	totalSize           	1071                
 	transactional       	true                
 	transactional_properties	default             
@@ -530,19 +533,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2_n3
                   filterExpr: ((c > 10) and a is not null) (type: boolean)
-                  Statistics: Num rows: 61 Data size: 7076 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10) and a is not null) (type: boolean)
-                    Statistics: Num rows: 20 Data size: 2320 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 20 Data size: 2320 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 20 Data size: 2320 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -556,14 +559,14 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 33 Data size: 3828 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int), _col2 (type: decimal(10,2))
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 33 Data size: 3828 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 33 Data size: 3828 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                         output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -573,7 +576,7 @@ STAGE PLANS:
                   Select Operator
                     expressions: _col0 (type: int), _col1 (type: decimal(10,2))
                     outputColumnNames: a, c
-                    Statistics: Num rows: 33 Data size: 3828 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
                       mode: hash
@@ -741,19 +744,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2_n3
                   filterExpr: ((c > 10) and a is not null) (type: boolean)
-                  Statistics: Num rows: 75 Data size: 8700 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10) and a is not null) (type: boolean)
-                    Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -767,14 +770,14 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 41 Data size: 4756 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int), _col2 (type: decimal(10,2))
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 41 Data size: 4756 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 41 Data size: 4756 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                         output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -784,7 +787,7 @@ STAGE PLANS:
                   Select Operator
                     expressions: _col0 (type: int), _col1 (type: decimal(10,2))
                     outputColumnNames: a, c
-                    Statistics: Num rows: 41 Data size: 4756 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
                       mode: hash
@@ -956,19 +959,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2_n3
                   filterExpr: ((c > 10) and (ROW__ID.writeid > 4) and a is not null) (type: boolean)
-                  Statistics: Num rows: 91 Data size: 10556 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((ROW__ID.writeid > 4) and (c > 10) and a is not null) (type: boolean)
-                    Statistics: Num rows: 10 Data size: 1160 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -982,14 +985,14 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 16 Data size: 1856 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int), _col2 (type: decimal(10,2))
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 16 Data size: 1856 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 16 Data size: 1856 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                         output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -999,7 +1002,7 @@ STAGE PLANS:
                   Select Operator
                     expressions: _col0 (type: int), _col1 (type: decimal(10,2))
                     outputColumnNames: a, c
-                    Statistics: Num rows: 16 Data size: 1856 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: compute_stats(a, 'hll'), compute_stats(c, 'hll')
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
index 210db2c..fe54771 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_rebuild_dummy.q.out
@@ -759,19 +759,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2_n0
                   filterExpr: ((c > 10.1) and a is not null) (type: boolean)
-                  Statistics: Num rows: 46 Data size: 5336 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10.1) and a is not null) (type: boolean)
-                    Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -785,17 +785,17 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: int), _col1 (type: decimal(10,2))
                     sort order: ++
                     Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2))
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -803,14 +803,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2))
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1041,19 +1041,19 @@ STAGE PLANS:
                 TableScan
                   alias: cmv_basetable_2_n0
                   filterExpr: ((c > 10.1) and a is not null) (type: boolean)
-                  Statistics: Num rows: 46 Data size: 5336 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((c > 10.1) and a is not null) (type: boolean)
-                    Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: a (type: int), c (type: decimal(10,2))
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 15 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: decimal(10,2))
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1067,17 +1067,17 @@ STAGE PLANS:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col2
-                Statistics: Num rows: 25 Data size: 2900 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   keys: _col0 (type: int), _col2 (type: decimal(10,2))
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: _col0 (type: int), _col1 (type: decimal(10,2))
                     sort order: ++
                     Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2))
-                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1085,14 +1085,14 @@ STAGE PLANS:
                 keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2))
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: int)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/mm_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out
index 95734b6..500c7fa 100644
--- a/ql/src/test/results/clientpositive/llap/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out
@@ -1815,6 +1815,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	3                   
 	numRows             	6                   
@@ -1865,6 +1866,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	6                   
 	numRows             	12                  
@@ -1923,7 +1925,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{}                  
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	bucketing_version   	2                   
 	numFiles            	55                  
 	numRows             	500                 

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/mm_exim.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mm_exim.q.out b/ql/src/test/results/clientpositive/llap/mm_exim.q.out
index 37d3952..ee6cf06 100644
--- a/ql/src/test/results/clientpositive/llap/mm_exim.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_exim.q.out
@@ -386,6 +386,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	bucketing_version   	2                   
 	numFiles            	3                   
 	numPartitions       	3                   

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out b/ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out
index 7efb50a..d4b55bb 100644
--- a/ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out
+++ b/ql/src/test/results/clientpositive/llap/results_cache_invalidation.q.out
@@ -58,20 +58,20 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   filterExpr: (UDFToDouble(key) >= 0.0D) (type: boolean)
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (UDFToDouble(key) >= 0.0D) (type: boolean)
-                    Statistics: Num rows: 30 Data size: 5338 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 30 Data size: 5338 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: count()
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -82,10 +82,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -128,19 +128,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: tab2_n5
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: max(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -151,10 +151,10 @@ STAGE PLANS:
                 aggregations: max(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -199,19 +199,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab1_n6
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Map 4 
@@ -219,19 +219,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab2_n5
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -243,15 +243,15 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
-                Statistics: Num rows: 95 Data size: 17028 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 791 Data size: 6328 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
@@ -260,10 +260,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -397,20 +397,20 @@ STAGE PLANS:
                 TableScan
                   alias: a
                   filterExpr: (UDFToDouble(key) >= 0.0D) (type: boolean)
-                  Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (UDFToDouble(key) >= 0.0D) (type: boolean)
-                    Statistics: Num rows: 37 Data size: 6562 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 167 Data size: 14529 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
-                      Statistics: Num rows: 37 Data size: 6562 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 167 Data size: 14529 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: count()
                         mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Reduce Output Operator
                           sort order: 
-                          Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -421,10 +421,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -495,19 +495,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab1_n6
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Map 4 
@@ -515,19 +515,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab2_n5
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 91 Data size: 16192 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 87 Data size: 15480 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -539,15 +539,15 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
-                Statistics: Num rows: 116 Data size: 20681 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 792 Data size: 6336 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
@@ -556,10 +556,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -640,19 +640,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: tab2_n5
-                  Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
-                    Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: max(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -663,10 +663,10 @@ STAGE PLANS:
                 aggregations: max(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -712,19 +712,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab1_n6
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Map 4 
@@ -732,19 +732,19 @@ STAGE PLANS:
                 TableScan
                   alias: tab2_n5
                   filterExpr: key is not null (type: boolean)
-                  Statistics: Num rows: 111 Data size: 19688 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 106 Data size: 18801 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 501 Data size: 43587 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -756,15 +756,15 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
-                Statistics: Num rows: 116 Data size: 20681 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 794 Data size: 6352 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: vectorized, llap
@@ -773,10 +773,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat


[05/13] hive git commit: HIVE-19888: Misleading "METASTORE_FILTER_HOOK will be ignored" warning from SessionState (Marcelo Vanzin via Zoltan Haindrich)

Posted by se...@apache.org.
HIVE-19888: Misleading "METASTORE_FILTER_HOOK will be ignored" warning from SessionState (Marcelo Vanzin via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4abc64c9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4abc64c9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4abc64c9

Branch: refs/heads/master-txnstats
Commit: 4abc64c9c0e07e219d32c7edf09447b101076e28
Parents: d1fb780
Author: Marcelo Vanzin <va...@cloudera.com>
Authored: Tue Jun 26 09:34:04 2018 +0200
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Tue Jun 26 09:34:04 2018 +0200

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4abc64c9/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 81864f5..6762ee6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -949,10 +949,10 @@ public class SessionState {
     if (sessionConf.get(CONFIG_AUTHZ_SETTINGS_APPLIED_MARKER, "").equals(Boolean.TRUE.toString())) {
       return;
     }
-    String metastoreHook = sessionConf.get(ConfVars.METASTORE_FILTER_HOOK.name());
+    String metastoreHook = sessionConf.getVar(ConfVars.METASTORE_FILTER_HOOK);
     if (!ConfVars.METASTORE_FILTER_HOOK.getDefaultValue().equals(metastoreHook) &&
         !AuthorizationMetaStoreFilterHook.class.getName().equals(metastoreHook)) {
-      LOG.warn(ConfVars.METASTORE_FILTER_HOOK.name() +
+      LOG.warn(ConfVars.METASTORE_FILTER_HOOK.varname +
           " will be ignored, since hive.security.authorization.manager" +
           " is set to instance of HiveAuthorizerFactory.");
     }


[08/13] hive git commit: HIVE-19999 : Move precommit jobs to jdk 8 (Vihang Karajgaonkar)

Posted by se...@apache.org.
HIVE-19999 : Move precommit jobs to jdk 8 (Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/966b83e3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/966b83e3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/966b83e3

Branch: refs/heads/master-txnstats
Commit: 966b83e3b9123bb455572d47878601d60b86999e
Parents: 1154d08
Author: Vihang Karajgaonkar <vi...@cloudera.com>
Authored: Tue Jun 26 12:14:19 2018 -0700
Committer: Vihang Karajgaonkar <vi...@cloudera.com>
Committed: Tue Jun 26 12:15:28 2018 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hive/ptest/api/client/PTestClient.java    | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/966b83e3/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/PTestClient.java
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/PTestClient.java b/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/PTestClient.java
index e878e18..fd84169 100644
--- a/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/PTestClient.java
+++ b/testutils/ptest2/src/main/java/org/apache/hive/ptest/api/client/PTestClient.java
@@ -283,8 +283,6 @@ public class PTestClient {
     }
   }
   public static void main(String[] args) throws Exception {
-    //TODO This line can be removed once precommit jenkins jobs move to Java 8
-    System.setProperty("https.protocols", "TLSv1,TLSv1.1,TLSv1.2");
     CommandLineParser parser = new GnuParser();
     Options options = new Options();
     options.addOption(HELP_SHORT, HELP_LONG, false, "Display help text and exit");


[10/13] hive git commit: HIVE-19532 : fix tests for master-txnstats branch (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19532 : fix tests for master-txnstats branch (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bdc256ed
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bdc256ed
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bdc256ed

Branch: refs/heads/master-txnstats
Commit: bdc256edeff716b655edc0604b8d0e9cd9fe8597
Parents: 3c21861
Author: sergey <se...@apache.org>
Authored: Tue Jun 26 18:12:41 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Jun 26 18:12:41 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/metadata/TestHive.java       |  7 ++++++
 .../hive/metastore/HiveMetaStoreClient.java     |  2 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |  2 +-
 .../hadoop/hive/metastore/ObjectStore.java      |  4 +--
 .../metastore/client/TestAlterPartitions.java   | 26 ++++++++++++++------
 5 files changed, 30 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bdc256ed/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index a24b642..930282d 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -324,6 +324,13 @@ public class TestHive extends TestCase {
       tbl.setCreateTime(ft.getTTable().getCreateTime());
       tbl.getParameters().put(hive_metastoreConstants.DDL_TIME,
           ft.getParameters().get(hive_metastoreConstants.DDL_TIME));
+      // Txn stuff set by metastore
+      if (tbl.getTTable().isSetTxnId()) {
+        ft.getTTable().setTxnId(tbl.getTTable().getTxnId());
+      }
+      if (tbl.getTTable().isSetValidWriteIdList()) {
+        ft.getTTable().setValidWriteIdList(tbl.getTTable().getValidWriteIdList());
+      }
       assertTrue("Tables  doesn't match: " + tableName + " (" + ft.getTTable()
           + "; " + tbl.getTTable() + ")", ft.getTTable().equals(tbl.getTTable()));
       assertEquals("SerializationLib is not set correctly", tbl

http://git-wip-us.apache.org/repos/asf/hive/blob/bdc256ed/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 626e103..1e50ba7 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1886,7 +1886,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
       throws TException {
     alter_partitions(
-        getDefaultCatalog(conf), dbName, tblName, newParts, null, -1, null);
+        getDefaultCatalog(conf), dbName, tblName, newParts, new EnvironmentContext(), -1, null);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/bdc256ed/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index c4cd8b4..8bc3df5 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -2155,7 +2155,7 @@ public interface IMetaStoreClient {
   default void alter_partitions(String catName, String dbName, String tblName,
                                 List<Partition> newParts)
       throws InvalidOperationException, MetaException, TException {
-    alter_partitions(catName, dbName, tblName, newParts, null,-1, null);
+    alter_partitions(catName, dbName, tblName, newParts, new EnvironmentContext(), -1, null);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/bdc256ed/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 9266879..2c3554e 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -2381,6 +2381,7 @@ public class ObjectStore implements RawStore, Configurable {
     boolean commited = false;
 
     try {
+      openTransaction();
       String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf);
       MTable table = this.getMTable(catName, part.getDbName(), part.getTableName());
       List<MTablePrivilege> tabGrants = null;
@@ -2390,7 +2391,6 @@ public class ObjectStore implements RawStore, Configurable {
         tabColumnGrants = this.listTableAllColumnGrants(
             catName, part.getDbName(), part.getTableName());
       }
-      openTransaction();
       MPartition mpart = convertToMPart(part, table, true);
       pm.makePersistent(mpart);
 
@@ -4202,7 +4202,7 @@ public class ObjectStore implements RawStore, Configurable {
     catName = normalizeIdentifier(catName);
     name = normalizeIdentifier(name);
     dbname = normalizeIdentifier(dbname);
-    MTable table = this.getMTable(catName, dbname, name);
+    MTable table = this.getMTable(newPart.getCatName(), newPart.getDbName(), newPart.getTableName());
     MPartition oldp = getMPartition(catName, dbname, name, part_vals);
     MPartition newp = convertToMPart(newPart, table, false);
     MColumnDescriptor oldCD = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/bdc256ed/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index f19b505..0a62ac1 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.metastore.client;
 
+import java.net.ProtocolException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -41,6 +42,7 @@ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
 import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
 import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
 import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TProtocolException;
 import org.apache.thrift.transport.TTransportException;
 
 import com.google.common.collect.Lists;
@@ -692,11 +694,16 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.alter_partitions(DB_NAME, "", Lists.newArrayList(part));
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterPartitionsNullTblName() throws Exception {
     createTable4PartColsParts(client);
     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
-    client.alter_partitions(DB_NAME, null, Lists.newArrayList(part));
+    try {
+      client.alter_partitions(DB_NAME, null, Lists.newArrayList(part));
+      Assert.fail("didn't throw");
+    } catch (TProtocolException | MetaException e) {
+      // By design
+    }
   }
 
   @Test(expected = NullPointerException.class)
@@ -720,7 +727,7 @@ public class TestAlterPartitions extends MetaStoreClientTest {
       Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
       client.alter_partitions(DB_NAME, TABLE_NAME, null);
       fail("Should have thrown exception");
-    } catch (NullPointerException | TTransportException e) {
+    } catch (NullPointerException | TTransportException | TProtocolException e) {
       //TODO: should not throw different exceptions for different HMS deployment types
     }
   }
@@ -786,7 +793,7 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     assertPartitionsHaveCorrectValues(newParts, testValues);
 
     client.alter_partitions(DB_NAME, TABLE_NAME, newParts, new EnvironmentContext());
-    client.alter_partitions(DB_NAME, TABLE_NAME, newParts, null);
+    client.alter_partitions(DB_NAME, TABLE_NAME, newParts);
 
     for (int i = 0; i < testValues.size(); ++i) {
       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
@@ -860,11 +867,16 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.alter_partitions(DB_NAME, "", Lists.newArrayList(part), new EnvironmentContext());
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testAlterPartitionsWithEnvironmentCtxNullTblName() throws Exception {
     createTable4PartColsParts(client);
     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
-    client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext());
+    try {
+      client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext());
+      Assert.fail("didn't throw");
+    } catch (MetaException | TProtocolException ex) {
+      // By design.
+    }
   }
 
   @Test(expected = NullPointerException.class)
@@ -890,7 +902,7 @@ public class TestAlterPartitions extends MetaStoreClientTest {
       Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
       client.alter_partitions(DB_NAME, TABLE_NAME, null, new EnvironmentContext());
       fail("Should have thrown exception");
-    } catch (NullPointerException | TTransportException e) {
+    } catch (NullPointerException | TTransportException | TProtocolException e) {
       //TODO: should not throw different exceptions for different HMS deployment types
     }
   }


[07/13] hive git commit: HIVE-19997: Batches for TestMiniDruidCliDriver (Jesus Camacho Rodriguez, reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
HIVE-19997: Batches for TestMiniDruidCliDriver (Jesus Camacho Rodriguez, reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1154d08a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1154d08a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1154d08a

Branch: refs/heads/master-txnstats
Commit: 1154d08a7e54933bb0e929a5af4e0d8a100f58d9
Parents: ee14e36
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue Jun 26 08:34:15 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Tue Jun 26 10:37:10 2018 -0700

----------------------------------------------------------------------
 testutils/ptest2/conf/deployed/master-mr2.properties | 7 +++++++
 1 file changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1154d08a/testutils/ptest2/conf/deployed/master-mr2.properties
----------------------------------------------------------------------
diff --git a/testutils/ptest2/conf/deployed/master-mr2.properties b/testutils/ptest2/conf/deployed/master-mr2.properties
index f04c0ce..90a654c 100644
--- a/testutils/ptest2/conf/deployed/master-mr2.properties
+++ b/testutils/ptest2/conf/deployed/master-mr2.properties
@@ -179,3 +179,10 @@ qFileTest.erasurecodingCli.batchSize = 15
 qFileTest.erasurecodingCli.queryFilesProperty = qfile
 qFileTest.erasurecodingCli.include = normal
 qFileTest.erasurecodingCli.groups.normal = mainProperties.${erasurecoding.only.query.files} mainProperties.${erasurecoding.shared.query.files}
+
+qFileTest.miniDruid.driver = TestMiniDruidCliDriver
+qFileTest.miniDruid.directory = ql/src/test/queries/clientpositive
+qFileTest.miniDruid.batchSize = 5
+qFileTest.miniDruid.queryFilesProperty = qfile
+qFileTest.miniDruid.include = normal
+qFileTest.miniDruid.groups.normal = mainProperties.${druid.query.files}


[02/13] hive git commit: HIVE-19481 : Tablesample uses incorrect logic to pick files corresponding to buckets. (Deepak Jaiswal, reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/spark/sample6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample6.q.out b/ql/src/test/results/clientpositive/spark/sample6.q.out
index 36532d7..153f0fd 100644
--- a/ql/src/test/results/clientpositive/spark/sample6.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample6.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000000_0
+                  base file name: srcbucket
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -127,7 +127,7 @@ STAGE PLANS:
                     name: default.srcbucket
                   name: default.srcbucket
             Truncated Path -> Alias:
-              /srcbucket/000000_0 [s]
+              /srcbucket [s]
 
   Stage: Stage-0
     Move Operator
@@ -499,7 +499,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000001_0
+                  base file name: srcbucket
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -548,7 +548,7 @@ STAGE PLANS:
                     name: default.srcbucket
                   name: default.srcbucket
             Truncated Path -> Alias:
-              /srcbucket/000001_0 [s]
+              /srcbucket [s]
         Reducer 2 
             Execution mode: vectorized
             Needs Tagging: false
@@ -913,7 +913,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000000_0
+                  base file name: srcbucket
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -962,7 +962,7 @@ STAGE PLANS:
                     name: default.srcbucket
                   name: default.srcbucket
             Truncated Path -> Alias:
-              /srcbucket/000000_0 [s]
+              /srcbucket [s]
         Reducer 2 
             Execution mode: vectorized
             Needs Tagging: false
@@ -2528,57 +2528,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000000_0
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                    bucket_count 4
-                    bucket_field_name key
-                    bucketing_version 2
-                    column.name.delimiter ,
-                    columns key,value
-                    columns.comments 
-                    columns.types int:string
-#### A masked pattern was here ####
-                    name default.srcbucket2
-                    numFiles 4
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct srcbucket2 { i32 key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                      bucket_count 4
-                      bucket_field_name key
-                      bucketing_version 2
-                      column.name.delimiter ,
-                      columns key,value
-                      columns.comments 
-                      columns.types int:string
-#### A masked pattern was here ####
-                      name default.srcbucket2
-                      numFiles 4
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct srcbucket2 { i32 key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcbucket2
-                  name: default.srcbucket2
-#### A masked pattern was here ####
-                Partition
-                  base file name: 000002_0
+                  base file name: srcbucket2
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -2627,8 +2577,7 @@ STAGE PLANS:
                     name: default.srcbucket2
                   name: default.srcbucket2
             Truncated Path -> Alias:
-              /srcbucket2/000000_0 [s]
-              /srcbucket2/000002_0 [s]
+              /srcbucket2 [s]
         Reducer 2 
             Execution mode: vectorized
             Needs Tagging: false
@@ -2964,7 +2913,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000001_0
+                  base file name: srcbucket2
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -3013,7 +2962,7 @@ STAGE PLANS:
                     name: default.srcbucket2
                   name: default.srcbucket2
             Truncated Path -> Alias:
-              /srcbucket2/000001_0 [s]
+              /srcbucket2 [s]
         Reducer 2 
             Execution mode: vectorized
             Needs Tagging: false
@@ -3235,6 +3184,61 @@ STAGE PLANS:
                         tag: -1
                         auto parallelism: false
             Execution mode: vectorized
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: empty_bucket
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count 2
+                    bucket_field_name key
+                    bucketing_version 2
+                    column.name.delimiter ,
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.empty_bucket
+                    numFiles 0
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct empty_bucket { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 0
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count 2
+                      bucket_field_name key
+                      bucketing_version 2
+                      column.name.delimiter ,
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.empty_bucket
+                      numFiles 0
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct empty_bucket { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 0
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.empty_bucket
+                  name: default.empty_bucket
+            Truncated Path -> Alias:
+              /empty_bucket [s]
         Reducer 2 
             Execution mode: vectorized
             Needs Tagging: false

http://git-wip-us.apache.org/repos/asf/hive/blob/eaf416ea/ql/src/test/results/clientpositive/spark/sample7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample7.q.out b/ql/src/test/results/clientpositive/spark/sample7.q.out
index d0b52bc..d028e68 100644
--- a/ql/src/test/results/clientpositive/spark/sample7.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample7.q.out
@@ -80,7 +80,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: 000000_0
+                  base file name: srcbucket
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -129,7 +129,7 @@ STAGE PLANS:
                     name: default.srcbucket
                   name: default.srcbucket
             Truncated Path -> Alias:
-              /srcbucket/000000_0 [s]
+              /srcbucket [s]
 
   Stage: Stage-0
     Move Operator


[09/13] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3c218612
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3c218612
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3c218612

Branch: refs/heads/master-txnstats
Commit: 3c21861272dfa0aec3af366a5890f7965b94e469
Parents: 61c55a3 966b83e
Author: sergey <se...@apache.org>
Authored: Tue Jun 26 14:20:20 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Jun 26 14:20:20 2018 -0700

----------------------------------------------------------------------
 .../hive/jdbc/TestJdbcGenericUDTFGetSplits.java |   3 +
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/jdbc/TestHivePreparedStatement.java    | 277 +++---
 .../hive/ql/exec/tez/HiveSplitGenerator.java    |  40 +-
 .../hive/ql/exec/vector/VectorMapOperator.java  |   5 +-
 .../ql/exec/vector/VectorizedRowBatchCtx.java   |  15 -
 .../io/orc/VectorizedOrcAcidRowBatchReader.java |   4 +-
 .../hadoop/hive/ql/metadata/Partition.java      |  13 +-
 .../hadoop/hive/ql/optimizer/SamplePruner.java  |   7 +-
 .../hadoop/hive/ql/session/SessionState.java    |   4 +-
 .../test/queries/clientpositive/sample10_mm.q   |  34 +
 .../archive_excludeHadoop20.q.out               | 135 +++
 .../clientpositive/beeline/smb_mapjoin_11.q.out |   8 +-
 .../results/clientpositive/llap/sample10.q.out  |  20 +-
 .../clientpositive/llap/sample10_mm.q.out       | 346 ++++++++
 .../test/results/clientpositive/masking_5.q.out | 124 +++
 .../test/results/clientpositive/sample6.q.out   | 846 +++++++++++++++++--
 .../test/results/clientpositive/sample7.q.out   | 114 ++-
 .../test/results/clientpositive/sample9.q.out   | 258 +++++-
 .../results/clientpositive/smb_mapjoin_11.q.out |   8 +-
 .../infer_bucket_sort_bucketed_table.q.out      |   2 +-
 .../results/clientpositive/spark/sample10.q.out |  16 +-
 .../results/clientpositive/spark/sample2.q.out  |   4 +-
 .../results/clientpositive/spark/sample4.q.out  |   4 +-
 .../results/clientpositive/spark/sample6.q.out  | 126 +--
 .../results/clientpositive/spark/sample7.q.out  |   4 +-
 .../ptest2/conf/deployed/master-mr2.properties  |   7 +
 .../hive/ptest/api/client/PTestClient.java      |   2 -
 28 files changed, 2116 insertions(+), 311 deletions(-)
----------------------------------------------------------------------



[13/13] hive git commit: HIVE-19532 : fix tests - update some out files on master-txnstats branch (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19532 : fix tests - update some out files on master-txnstats branch (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/798ff7d2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/798ff7d2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/798ff7d2

Branch: refs/heads/master-txnstats
Commit: 798ff7d2443f4477c9fb02ad871511b152217829
Parents: bdc256e
Author: sergey <se...@apache.org>
Authored: Tue Jun 26 19:04:49 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Jun 26 19:04:49 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  14 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   6 +-
 .../hive/ql/optimizer/StatsOptimizer.java       |   2 +-
 .../clientpositive/autoColumnStats_4.q.out      |   7 +-
 .../llap/acid_bucket_pruning.q.out              |  14 +-
 .../llap/acid_vectorization_original.q.out      |  14 +-
 .../llap/dynpart_sort_optimization_acid.q.out   | 114 +++++------
 .../llap/enforce_constraint_notnull.q.out       |  24 +--
 .../llap/insert_into_default_keyword.q.out      | 122 ++++++------
 .../insert_values_orig_table_use_metadata.q.out | 196 ++-----------------
 .../materialized_view_create_rewrite_3.q.out    |  40 ++--
 .../materialized_view_create_rewrite_4.q.out    |  33 ++--
 .../materialized_view_create_rewrite_5.q.out    |  51 ++---
 ...ized_view_create_rewrite_rebuild_dummy.q.out |  40 ++--
 .../results/clientpositive/llap/mm_all.q.out    |   4 +-
 .../results/clientpositive/llap/mm_exim.q.out   |   1 +
 .../llap/results_cache_invalidation.q.out       | 130 ++++++------
 .../llap/results_cache_transactional.q.out      |  74 +++----
 ql/src/test/results/clientpositive/mm_all.q.out |   4 +-
 .../results/clientpositive/mm_default.q.out     |   2 +-
 .../test/results/clientpositive/row__id.q.out   |  18 +-
 .../results/clientpositive/stats_nonpart.q.out  |   3 +-
 .../results/clientpositive/stats_part.q.out     |   9 +-
 .../results/clientpositive/stats_part2.q.out    |  46 +++--
 .../results/clientpositive/stats_sizebug.q.out  |   5 +-
 .../tez/acid_vectorization_original_tez.q.out   |  14 +-
 .../clientpositive/tez/explainanalyze_5.q.out   |  10 +-
 27 files changed, 438 insertions(+), 559 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index e67b579..caf886f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1652,6 +1652,11 @@ public class AcidUtils {
     }
   }
 
+  // TODO# remove
+  public static TableSnapshot getTableSnapshot(
+      Configuration conf, Table tbl) throws LockException {
+    return getTableSnapshot(conf, tbl, false);
+  }
   /**
    * Create a TableShopshot with the given "conf"
    * for the table of the given "tbl".
@@ -1662,8 +1667,7 @@ public class AcidUtils {
    * @throws LockException
    */
   public static TableSnapshot getTableSnapshot(
-      Configuration conf,
-      Table tbl) throws LockException {
+      Configuration conf, Table tbl, boolean isInTxnScope) throws LockException {
     if (!isTransactionalTable(tbl)) {
       return null;
     } else {
@@ -1679,9 +1683,9 @@ public class AcidUtils {
       if (txnId > 0 && isTransactionalTable(tbl)) {
         validWriteIdList = getTableValidWriteIdList(conf, fullTableName);
 
-        // TODO: we shouldn't do this during normal Hive compilation, write IDs should be in conf.
-        //       Can this still happen for DDLTask-based queries?
-        if (validWriteIdList == null && !HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
+        // TODO: remove in_test filters?
+        if (validWriteIdList == null && !isInTxnScope
+            && !HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
           LOG.warn("Obtaining write IDs from metastore for " + tbl.getTableName());
           validWriteIdList = getTableValidWriteIdListWithTxnList(
               conf, tbl.getDbName(), tbl.getTableName());

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 9f052ae..c0a9be0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
@@ -768,7 +769,7 @@ public class Hive {
     try {
       AcidUtils.TableSnapshot tableSnapshot = null;
       if (transactional) {
-        tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable());
+        tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true);
       }
       // Remove the DDL time so that it gets refreshed
       for (Partition tmpPart: newParts) {
@@ -2448,12 +2449,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
    * @throws HiveException
    *           if table doesn't exist or partition already exists
    */
+  @VisibleForTesting
   public Partition createPartition(Table tbl, Map<String, String> partSpec) throws HiveException {
     try {
       org.apache.hadoop.hive.metastore.api.Partition part =
           Partition.createMetaPartitionObject(tbl, partSpec, null);
       AcidUtils.TableSnapshot tableSnapshot =
-          AcidUtils.getTableSnapshot(conf, tbl);
+          AcidUtils.getTableSnapshot(conf, tbl, false);
       part.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0);
       part.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
       return new Partition(tbl, getMSC().add_partition(part));

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 18a27c4..31041af 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -919,7 +919,7 @@ public class StatsOptimizer extends Transform {
         partNames.add(part.getName());
       }
       AcidUtils.TableSnapshot tableSnapshot =
-          AcidUtils.getTableSnapshot(hive.getConf(), tbl);
+          AcidUtils.getTableSnapshot(hive.getConf(), tbl, true);
 
       Map<String, List<ColumnStatisticsObj>> result = hive.getMSC().getPartitionColumnStatistics(
           tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName),

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
index a16ec07..1906865 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -199,8 +199,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	2                   
+	numRows             	10                  
+	rawDataSize         	0                   
 	totalSize           	1899                
 	transactional       	true                
 	transactional_properties	default             
@@ -241,9 +244,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{}                  
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	bucketing_version   	2                   
 	numFiles            	4                   
+	numRows             	8                   
+	rawDataSize         	0                   
 	totalSize           	3275                
 	transactional       	true                
 	transactional_properties	default             

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
index b856b99..cfb9f1b 100644
--- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
@@ -45,22 +45,22 @@ STAGE PLANS:
                   alias: acidtbldefault
                   filterExpr: (a = 1) (type: boolean)
                   buckets included: [13,] of 16
-                  Statistics: Num rows: 1850 Data size: 7036 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 9174 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
                     predicate: (a = 1) (type: boolean)
-                    Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: 1 (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                       File Output Operator
                         compressed: false
                         GlobalTableId: 0
                         directory: hdfs://### HDFS PATH ###
                         NumFilesPerFileSink: 1
-                        Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         Stats Publishing Key Prefix: hdfs://### HDFS PATH ###
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -88,6 +88,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                   output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                   properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true"}}
                     bucket_count 16
                     bucket_field_name a
                     bucketing_version 2
@@ -99,6 +100,8 @@ STAGE PLANS:
                     location hdfs://### HDFS PATH ###
                     name default.acidtbldefault
                     numFiles 17
+                    numRows 9174
+                    rawDataSize 0
                     serialization.ddl struct acidtbldefault { i32 a}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
@@ -111,6 +114,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                     output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true"}}
                       bucket_count 16
                       bucket_field_name a
                       bucketing_version 2
@@ -122,6 +126,8 @@ STAGE PLANS:
                       location hdfs://### HDFS PATH ###
                       name default.acidtbldefault
                       numFiles 17
+                      numRows 9174
+                      rawDataSize 0
                       serialization.ddl struct acidtbldefault { i32 a}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
index be1b4c6..57ff575 100644
--- a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
@@ -665,22 +665,22 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over10k_orc_bucketed
-                  Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                     outputColumnNames: ROW__ID
-                    Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count()
                       keys: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
-                        Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -692,13 +692,13 @@ STAGE PLANS:
                 keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
                 Filter Operator
                   predicate: (_col1 > 1L) (type: boolean)
-                  Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
index 7a9e200..6c3751d 100644
--- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
@@ -95,19 +95,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_part
                   filterExpr: ((key = 'foo') and (ds = '2008-04-08')) (type: boolean)
-                  Statistics: Num rows: 160 Data size: 61001 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1601 Data size: 139287 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -116,10 +116,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -191,7 +191,7 @@ STAGE PLANS:
                 TableScan
                   alias: acid_part
                   filterExpr: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean)
-                  Statistics: Num rows: 159 Data size: 104317 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1601 Data size: 433871 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
                     Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL
@@ -383,19 +383,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_part_sdpo
                   filterExpr: ((key = 'foo') and (ds = '2008-04-08')) (type: boolean)
-                  Statistics: Num rows: 176 Data size: 67063 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1601 Data size: 150414 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -404,10 +404,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -479,7 +479,7 @@ STAGE PLANS:
                 TableScan
                   alias: acid_part_sdpo
                   filterExpr: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean)
-                  Statistics: Num rows: 171 Data size: 112152 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1601 Data size: 444998 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
                     Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL
@@ -680,19 +680,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_2l_part
                   filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean)
-                  Statistics: Num rows: 157 Data size: 60527 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1601 Data size: 139287 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -701,10 +701,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -777,19 +777,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_2l_part
                   filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean)
-                  Statistics: Num rows: 1600 Data size: 156727 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 3201 Data size: 291291 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 10 Data size: 910 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), hr (type: int)
                       outputColumnNames: _col0, _col4
-                      Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                        Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL
                         value expressions: _col4 (type: int)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -799,10 +799,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col2 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -904,19 +904,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_2l_part
                   filterExpr: (value = 'bar') (type: boolean)
-                  Statistics: Num rows: 1600 Data size: 451127 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 4200 Data size: 1171800 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (value = 'bar') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 14 Data size: 3906 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), ds (type: string), hr (type: int)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                        Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL
                         value expressions: _col1 (type: string), _col2 (type: int)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -926,10 +926,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: int)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1103,19 +1103,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_2l_part_sdpo
                   filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean)
-                  Statistics: Num rows: 157 Data size: 60527 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1601 Data size: 150414 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -1124,10 +1124,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1200,19 +1200,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_2l_part_sdpo
                   filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean)
-                  Statistics: Num rows: 1600 Data size: 156727 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 3201 Data size: 313458 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 455 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), hr (type: int)
                       outputColumnNames: _col0, _col4
-                      Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: '2008-04-08' (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: ++++
                         Map-reduce partition columns: '2008-04-08' (type: string), _col4 (type: int)
-                        Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                        Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -1221,11 +1221,11 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
-                Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                Statistics: Num rows: 5 Data size: 1360 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
                   compressed: false
                   Dp Sort State: PARTITION_BUCKET_SORTED
-                  Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 5 Data size: 1360 Basic stats: COMPLETE Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1327,19 +1327,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_2l_part_sdpo
                   filterExpr: (value = 'bar') (type: boolean)
-                  Statistics: Num rows: 1600 Data size: 451127 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 4952 Data size: 2061430 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (value = 'bar') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 1375 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), ds (type: string), hr (type: int)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: _col1 (type: string), _col2 (type: int), '_bucket_number' (type: string), _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: ++++
                         Map-reduce partition columns: _col1 (type: string), _col2 (type: int)
-                        Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                        Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 
@@ -1348,11 +1348,11 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), KEY._col1 (type: string), KEY._col2 (type: int), KEY.'_bucket_number' (type: string)
                 outputColumnNames: _col0, _col1, _col2, '_bucket_number'
-                Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                Statistics: Num rows: 5 Data size: 1810 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
                   compressed: false
                   Dp Sort State: PARTITION_BUCKET_SORTED
-                  Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 5 Data size: 1810 Basic stats: COMPLETE Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1526,7 +1526,7 @@ STAGE PLANS:
                 TableScan
                   alias: acid_2l_part_sdpo_no_cp
                   filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean)
-                  Statistics: Num rows: 97 Data size: 82922 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1601 Data size: 599036 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
                     Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL
@@ -1625,19 +1625,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_2l_part_sdpo_no_cp
                   filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean)
-                  Statistics: Num rows: 1600 Data size: 598664 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 3201 Data size: 1197516 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
                     predicate: (key = 'foo') (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+                    Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), key (type: string), ds (type: string), hr (type: int)
                       outputColumnNames: _col0, _col1, _col3, _col4
-                      Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+                      Statistics: Num rows: 5 Data size: 2675 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
                         key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: ++++
                         Map-reduce partition columns: _col3 (type: string), _col4 (type: int)
-                        Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+                        Statistics: Num rows: 5 Data size: 2675 Basic stats: COMPLETE Column stats: PARTIAL
                         value expressions: _col1 (type: string), 'bar' (type: string)
             Execution mode: llap
             LLAP IO: may be used (ACID table)
@@ -1647,11 +1647,11 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
-                Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+                Statistics: Num rows: 5 Data size: 3165 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
                   compressed: false
                   Dp Sort State: PARTITION_BUCKET_SORTED
-                  Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+                  Statistics: Num rows: 5 Data size: 3165 Basic stats: COMPLETE Column stats: PARTIAL
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
index 8a5a326..ecf79ae 100644
--- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
+++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
@@ -3237,19 +3237,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_uami_n1
                   filterExpr: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean)
-                  Statistics: Num rows: 281 Data size: 87904 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1002 Data size: 225450 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean)
-                    Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 3 Data size: 675 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int), vc (type: varchar(128))
                       outputColumnNames: _col0, _col1, _col3
-                      Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: int), _col3 (type: varchar(128))
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -3259,10 +3259,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -3331,19 +3331,19 @@ STAGE PLANS:
                 TableScan
                   alias: acid_uami_n1
                   filterExpr: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean)
-                  Statistics: Num rows: 320 Data size: 100040 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1002 Data size: 225450 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean)
-                    Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 225 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int), vc (type: varchar(128))
                       outputColumnNames: _col0, _col1, _col3
-                      Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: int), _col3 (type: varchar(128))
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -3353,10 +3353,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128))
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
index cd38c51..a93593f 100644
--- a/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
+++ b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
@@ -1705,19 +1705,19 @@ STAGE PLANS:
                 TableScan
                   alias: insert_into1_n0
                   filterExpr: (value = 1) (type: boolean)
-                  Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (value = 1) (type: boolean)
-                    Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), value (type: string), i (type: int)
                       outputColumnNames: _col0, _col2, _col3
-                      Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col2 (type: string), _col3 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -1727,10 +1727,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 1 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1813,19 +1813,19 @@ STAGE PLANS:
                 TableScan
                   alias: insert_into1_n0
                   filterExpr: (value = 1) (type: boolean)
-                  Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (value = 1) (type: boolean)
-                    Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int)
                       outputColumnNames: _col0, _col3
-                      Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                        Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col3 (type: int)
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -1835,10 +1835,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 1 (type: int), null (type: string), VALUE._col0 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2371,15 +2371,15 @@ STAGE PLANS:
                 TableScan
                   alias: t
                   filterExpr: enforce_constraint(key is not null) (type: boolean)
-                  Statistics: Num rows: 20 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: enforce_constraint(key is not null) (type: boolean)
-                    Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: key (type: int)
                       sort order: +
                       Map-reduce partition columns: key (type: int)
-                      Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
         Map 5 
@@ -2408,18 +2408,18 @@ STAGE PLANS:
                   0 key (type: int)
                   1 key (type: int)
                 outputColumnNames: _col0, _col6
-                Statistics: Num rows: 11 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                 Filter Operator
                   predicate: _col0 is null (type: boolean)
-                  Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col6 (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
                       Map-reduce partition columns: null (type: string)
-                      Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: int)
         Reducer 3 
             Execution mode: llap
@@ -2427,10 +2427,10 @@ STAGE PLANS:
               Select Operator
                 expressions: VALUE._col0 (type: int), 'a1' (type: string), null (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2440,15 +2440,15 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col0 (type: int), 'a1' (type: string), null (type: string)
                   outputColumnNames: key, a1, value
-                  Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
                   Group By Operator
                     aggregations: compute_stats(key, 'hll'), compute_stats(a1, 'hll'), compute_stats(value, 'hll')
                     mode: hash
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
         Reducer 4 
             Execution mode: llap
@@ -2457,10 +2457,10 @@ STAGE PLANS:
                 aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2569,12 +2569,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: t
-                  Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     key expressions: key (type: int)
                     sort order: +
                     Map-reduce partition columns: key (type: int)
-                    Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: value (type: string), ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
             Execution mode: vectorized, llap
             LLAP IO: may be used (ACID table)
@@ -2601,62 +2601,62 @@ STAGE PLANS:
                   0 key (type: int)
                   1 key (type: int)
                 outputColumnNames: _col0, _col2, _col5, _col6, _col7
-                Statistics: Num rows: 22 Data size: 4136 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2 Data size: 432 Basic stats: COMPLETE Column stats: COMPLETE
                 Filter Operator
                   predicate: ((_col0 = _col6) and (_col6 < 3)) (type: boolean)
-                  Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col5 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       sort order: +
                       Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                      Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE
                 Filter Operator
                   predicate: ((_col0 = _col6) and (_col6 > 3) and (_col6 >= 3) and enforce_constraint(_col0 is not null)) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col5 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), _col0 (type: int), _col2 (type: string)
                     outputColumnNames: _col0, _col1, _col3
-                    Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       sort order: +
                       Map-reduce partition columns: UDFToInteger(_col0) (type: int)
-                      Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col1 (type: int), _col3 (type: string)
                 Filter Operator
                   predicate: (_col0 = _col6) (type: boolean)
-                  Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col5 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                     outputColumnNames: _col5
-                    Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       aggregations: count()
                       keys: _col5 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
-                        Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
                 Filter Operator
                   predicate: (_col0 is null and enforce_constraint(_col6 is not null)) (type: boolean)
-                  Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col6 (type: int), _col7 (type: string)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
                       Map-reduce partition columns: null (type: string)
-                      Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: int), _col1 (type: string)
         Reducer 3 
             Execution mode: vectorized, llap
@@ -2664,10 +2664,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                 outputColumnNames: _col0
-                Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2680,10 +2680,10 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 'a1' (type: string), VALUE._col1 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2698,17 +2698,17 @@ STAGE PLANS:
                 keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
                 Filter Operator
                   predicate: (_col1 > 1L) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: cardinality_violation(_col0) (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                       table:
                           input format: org.apache.hadoop.mapred.TextInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2717,19 +2717,19 @@ STAGE PLANS:
                     Select Operator
                       expressions: _col0 (type: int)
                       outputColumnNames: val
-                      Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         aggregations: compute_stats(val, 'hll')
                         mode: complete
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
                         Select Operator
                           expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>)
                           outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
                           File Output Operator
                             compressed: false
-                            Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE
+                            Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
                             table:
                                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2740,10 +2740,10 @@ STAGE PLANS:
               Select Operator
                 expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), null (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                       output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2753,15 +2753,15 @@ STAGE PLANS:
                 Select Operator
                   expressions: _col0 (type: int), _col1 (type: string), null (type: string)
                   outputColumnNames: key, a1, value
-                  Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE
                   Group By Operator
                     aggregations: compute_stats(key, 'hll'), compute_stats(a1, 'hll'), compute_stats(value, 'hll')
                     mode: hash
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       sort order: 
-                      Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
         Reducer 7 
             Execution mode: llap
@@ -2770,10 +2770,10 @@ STAGE PLANS:
                 aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat