You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2017/05/08 22:17:18 UTC

[01/50] [abbrv] hive git commit: HIVE-15396: Basic Stats are not collected when for managed tables with LOCATION specified (Sahil Takiar, reviewed by Pengcheng Xiong)

Repository: hive
Updated Branches:
  refs/heads/hive-14535 ed64a74e8 -> 1ceaf357b


HIVE-15396: Basic Stats are not collected when for managed tables with LOCATION specified (Sahil Takiar, reviewed by Pengcheng Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1af98024
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1af98024
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1af98024

Branch: refs/heads/hive-14535
Commit: 1af980242ab759795f498486a1124460cde98409
Parents: 6af5124
Author: Pengcheng Xiong <px...@hortonworks.com>
Authored: Tue May 2 14:25:03 2017 -0700
Committer: Pengcheng Xiong <px...@hortonworks.com>
Committed: Tue May 2 14:25:03 2017 -0700

----------------------------------------------------------------------
 .../clientpositive/insert_into_table.q.out      | 27 ++++++++++++++++++++
 .../write_final_output_blobstore.q.out          | 20 +++++++++++++++
 .../apache/hadoop/hive/ql/metadata/Table.java   | 12 +++++++++
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |  3 ++-
 .../test/results/clientnegative/external1.q.out |  2 +-
 .../clientpositive/default_file_format.q.out    | 10 ++++++++
 .../results/clientpositive/deleteAnalyze.q.out  | 20 +++++++++------
 .../clientpositive/llap/deleteAnalyze.q.out     | 16 +++++++-----
 .../temp_table_display_colstats_tbllvl.q.out    |  3 +++
 9 files changed, 97 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
index 4ed53e5..663a572 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/insert_into_table.q.out
@@ -71,6 +71,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                       bucket_count -1
                       column.name.delimiter ,
                       columns id
@@ -80,6 +81,8 @@ STAGE PLANS:
                       location ### test.blobstore.path ###/table1
                       name default.table1
                       numFiles 2
+                      numRows 2
+                      rawDataSize 2
                       serialization.ddl struct table1 { i32 id}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -142,6 +145,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns id
@@ -151,6 +155,8 @@ STAGE PLANS:
                 location ### test.blobstore.path ###/table1
                 name default.table1
                 numFiles 2
+                numRows 2
+                rawDataSize 2
                 serialization.ddl struct table1 { i32 id}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -177,6 +183,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                     bucket_count -1
                     column.name.delimiter ,
                     columns id
@@ -186,6 +193,8 @@ STAGE PLANS:
                     location ### test.blobstore.path ###/table1
                     name default.table1
                     numFiles 2
+                    numRows 2
+                    rawDataSize 2
                     serialization.ddl struct table1 { i32 id}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -205,6 +214,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
               bucket_count -1
               column.name.delimiter ,
               columns id
@@ -214,6 +224,8 @@ STAGE PLANS:
               location ### test.blobstore.path ###/table1
               name default.table1
               numFiles 2
+              numRows 2
+              rawDataSize 2
               serialization.ddl struct table1 { i32 id}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -224,6 +236,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns id
@@ -233,6 +246,8 @@ STAGE PLANS:
                 location ### test.blobstore.path ###/table1
                 name default.table1
                 numFiles 2
+                numRows 2
+                rawDataSize 2
                 serialization.ddl struct table1 { i32 id}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -253,6 +268,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns id
@@ -262,6 +278,8 @@ STAGE PLANS:
                 location ### test.blobstore.path ###/table1
                 name default.table1
                 numFiles 2
+                numRows 2
+                rawDataSize 2
                 serialization.ddl struct table1 { i32 id}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -284,6 +302,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                     bucket_count -1
                     column.name.delimiter ,
                     columns id
@@ -293,6 +312,8 @@ STAGE PLANS:
                     location ### test.blobstore.path ###/table1
                     name default.table1
                     numFiles 2
+                    numRows 2
+                    rawDataSize 2
                     serialization.ddl struct table1 { i32 id}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -312,6 +333,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
               bucket_count -1
               column.name.delimiter ,
               columns id
@@ -321,6 +343,8 @@ STAGE PLANS:
               location ### test.blobstore.path ###/table1
               name default.table1
               numFiles 2
+              numRows 2
+              rawDataSize 2
               serialization.ddl struct table1 { i32 id}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
@@ -331,6 +355,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns id
@@ -340,6 +365,8 @@ STAGE PLANS:
                 location ### test.blobstore.path ###/table1
                 name default.table1
                 numFiles 2
+                numRows 2
+                rawDataSize 2
                 serialization.ddl struct table1 { i32 id}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
index 8a90a9e..46bfef5 100644
--- a/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
+++ b/itests/hive-blobstore/src/test/results/clientpositive/write_final_output_blobstore.q.out
@@ -192,6 +192,7 @@ STAGE PLANS:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                 properties:
+                  COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                   bucket_count -1
                   column.name.delimiter ,
                   columns key
@@ -200,9 +201,13 @@ STAGE PLANS:
 #### A masked pattern was here ####
                   location ### test.blobstore.path ###/write_final_output_blobstore
                   name default.blobstore_table
+                  numFiles 0
+                  numRows 0
+                  rawDataSize 0
                   serialization.ddl struct blobstore_table { i32 key}
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  totalSize 0
 #### A masked pattern was here ####
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: default.blobstore_table
@@ -219,6 +224,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns key
@@ -227,9 +233,13 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 location ### test.blobstore.path ###/write_final_output_blobstore
                 name default.blobstore_table
+                numFiles 0
+                numRows 0
+                rawDataSize 0
                 serialization.ddl struct blobstore_table { i32 key}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.blobstore_table
@@ -406,6 +416,7 @@ STAGE PLANS:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                 properties:
+                  COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                   bucket_count -1
                   column.name.delimiter ,
                   columns key
@@ -414,9 +425,13 @@ STAGE PLANS:
 #### A masked pattern was here ####
                   location ### test.blobstore.path ###/write_final_output_blobstore
                   name default.blobstore_table
+                  numFiles 0
+                  numRows 0
+                  rawDataSize 0
                   serialization.ddl struct blobstore_table { i32 key}
                   serialization.format 1
                   serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  totalSize 0
 #### A masked pattern was here ####
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: default.blobstore_table
@@ -433,6 +448,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
                 bucket_count -1
                 column.name.delimiter ,
                 columns key
@@ -441,9 +457,13 @@ STAGE PLANS:
 #### A masked pattern was here ####
                 location ### test.blobstore.path ###/write_final_output_blobstore
                 name default.blobstore_table
+                numFiles 0
+                numRows 0
+                rawDataSize 0
                 serialization.ddl struct blobstore_table { i32 key}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.blobstore_table

http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 3122689..a53f774 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.metadata;
 
+import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -29,6 +30,7 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -946,6 +948,16 @@ public class Table implements Serializable {
     }
   }
 
+  public boolean isEmpty() throws HiveException {
+    Preconditions.checkNotNull(getPath());
+    try {
+      FileSystem fs = FileSystem.get(getPath().toUri(), SessionState.getSessionConf());
+      return !fs.exists(getPath()) || fs.listStatus(getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER).length == 0;
+    } catch (IOException e) {
+      throw new HiveException(e);
+    }
+  }
+
   public boolean isTemporary() {
     return tTable.isTemporary();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
index 4f614a8..d971c73 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
@@ -834,7 +834,8 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
         }
       }
     }
-    if (getLocation() == null && !this.isCTAS) {
+
+    if (!this.isCTAS && (tbl.getPath() == null || (tbl.isEmpty() && !isExternal()))) {
       if (!tbl.isPartitioned() && conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
         StatsSetupConst.setBasicStatsStateForCreateTable(tbl.getTTable().getParameters(),
             StatsSetupConst.TRUE);

http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/ql/src/test/results/clientnegative/external1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/external1.q.out b/ql/src/test/results/clientnegative/external1.q.out
index c583627..661d669 100644
--- a/ql/src/test/results/clientnegative/external1.q.out
+++ b/ql/src/test/results/clientnegative/external1.q.out
@@ -3,4 +3,4 @@ PREHOOK: type: CREATETABLE
 #### A masked pattern was here ####
 PREHOOK: Output: database:default
 PREHOOK: Output: default@external1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Got exception: java.io.IOException No FileSystem for scheme: invalidscheme)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.io.IOException: No FileSystem for scheme: invalidscheme

http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/ql/src/test/results/clientpositive/default_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/default_file_format.q.out b/ql/src/test/results/clientpositive/default_file_format.q.out
index ef0ca52..55401c0 100644
--- a/ql/src/test/results/clientpositive/default_file_format.q.out
+++ b/ql/src/test/results/clientpositive/default_file_format.q.out
@@ -201,6 +201,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 
@@ -498,6 +503,11 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/ql/src/test/results/clientpositive/deleteAnalyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/deleteAnalyze.q.out b/ql/src/test/results/clientpositive/deleteAnalyze.q.out
index 4382522..1bae859 100644
--- a/ql/src/test/results/clientpositive/deleteAnalyze.q.out
+++ b/ql/src/test/results/clientpositive/deleteAnalyze.q.out
@@ -48,7 +48,10 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	numFiles            	1                   
+	numRows             	2                   
+	rawDataSize         	634                 
 	totalSize           	578                 
 #### A masked pattern was here ####
 	 	 
@@ -72,6 +75,7 @@ POSTHOOK: Input: default@testdeci2
 # col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
 	 	 	 	 	 	 	 	 	 	 
 amount              	decimal(10,3)       	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 
 PREHOOK: query: analyze table testdeci2 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@testdeci2
@@ -112,23 +116,23 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s
-            Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: item is not null (type: boolean)
-              Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: id (type: int), item (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col1 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 5 Data size: 440 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: int)
           TableScan
             alias: d
-            Statistics: Num rows: 1 Data size: 312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2 Data size: 624 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((id = 2) and item is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 312 Basic stats: COMPLETE Column stats: COMPLETE
@@ -150,14 +154,14 @@ STAGE PLANS:
             0 _col1 (type: string)
             1 _col3 (type: string)
           outputColumnNames: _col0, _col3, _col4
-          Statistics: Num rows: 5 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 2 Data size: 456 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col0 (type: int), COALESCE(_col3,0) (type: decimal(13,3)), COALESCE(_col4,0) (type: decimal(13,3))
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 5 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 2 Data size: 456 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 5 Data size: 1140 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 2 Data size: 456 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out b/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out
index 98ba6af..5db87d9 100644
--- a/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out
+++ b/ql/src/test/results/clientpositive/llap/deleteAnalyze.q.out
@@ -48,7 +48,10 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	numFiles            	1                   
+	numRows             	2                   
+	rawDataSize         	634                 
 	totalSize           	578                 
 #### A masked pattern was here ####
 	 	 
@@ -72,6 +75,7 @@ POSTHOOK: Input: default@testdeci2
 # col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
 	 	 	 	 	 	 	 	 	 	 
 amount              	decimal(10,3)       	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 
 PREHOOK: query: analyze table testdeci2 compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@testdeci2
@@ -117,18 +121,18 @@ Stage-0
     Stage-1
       Reducer 2 llap
       File Output Operator [FS_10]
-        Select Operator [SEL_9] (rows=5 width=228)
+        Select Operator [SEL_9] (rows=2 width=228)
           Output:["_col0","_col1","_col2"]
-          Merge Join Operator [MERGEJOIN_15] (rows=5 width=228)
+          Merge Join Operator [MERGEJOIN_15] (rows=2 width=228)
             Conds:RS_6._col1=RS_7._col3(Inner),Output:["_col0","_col3","_col4"]
           <-Map 1 [SIMPLE_EDGE] llap
             SHUFFLE [RS_6]
               PartitionCols:_col1
-              Select Operator [SEL_2] (rows=5 width=88)
+              Select Operator [SEL_2] (rows=2 width=88)
                 Output:["_col0","_col1"]
-                Filter Operator [FIL_13] (rows=5 width=88)
+                Filter Operator [FIL_13] (rows=2 width=88)
                   predicate:item is not null
-                  TableScan [TS_0] (rows=5 width=88)
+                  TableScan [TS_0] (rows=2 width=88)
                     default@testdeci2,s,Tbl:COMPLETE,Col:COMPLETE,Output:["id","item"]
           <-Map 3 [SIMPLE_EDGE] llap
             SHUFFLE [RS_7]
@@ -137,6 +141,6 @@ Stage-0
                 Output:["_col1","_col2","_col3"]
                 Filter Operator [FIL_14] (rows=1 width=312)
                   predicate:((id = 2) and item is not null)
-                  TableScan [TS_3] (rows=1 width=312)
+                  TableScan [TS_3] (rows=2 width=312)
                     default@testdeci2,d,Tbl:COMPLETE,Col:COMPLETE,Output:["id","amount","sales_tax","item"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1af98024/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
index 4660c8b..551ad5f 100644
--- a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
+++ b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
@@ -292,6 +292,7 @@ POSTHOOK: Input: default@empty_tab
 # col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
 	 	 	 	 	 	 	 	 	 	 
 a                   	int                 	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 
 PREHOOK: query: explain
 analyze table empty_tab compute statistics for columns a,b,c,d,e
 PREHOOK: type: QUERY
@@ -360,6 +361,7 @@ POSTHOOK: Input: default@empty_tab
 # col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
 	 	 	 	 	 	 	 	 	 	 
 a                   	int                 	0                   	0                   	0                   	0                   	                    	                    	                    	                    	from deserializer   
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 
 PREHOOK: query: desc formatted empty_tab b
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@empty_tab
@@ -369,6 +371,7 @@ POSTHOOK: Input: default@empty_tab
 # col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
 	 	 	 	 	 	 	 	 	 	 
 b                   	double              	0.0                 	0.0                 	0                   	0                   	                    	                    	                    	                    	from deserializer   
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 
 PREHOOK: query: CREATE DATABASE test
 PREHOOK: type: CREATEDATABASE
 PREHOOK: Output: database:test


[09/50] [abbrv] hive git commit: HIVE-16576: Fix encoding of intervals when fetching select query candidates from druid (Nishant Bangarwa, reviewed by Jesus Camacho Rodriguez)

Posted by we...@apache.org.
HIVE-16576: Fix encoding of intervals when fetching select query candidates from druid (Nishant Bangarwa, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f56abb40
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f56abb40
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f56abb40

Branch: refs/heads/hive-14535
Commit: f56abb4054cbc4ba8c8511596117f7823d60dbe6
Parents: d769f35
Author: Nishant Bangarwa <ni...@gmail.com>
Authored: Thu May 4 09:33:35 2017 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu May 4 09:35:10 2017 +0100

----------------------------------------------------------------------
 .../apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f56abb40/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
index fe6c901..53624e1 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.druid.io;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.URL;
+import java.net.URLEncoder;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -213,7 +214,7 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
             StringUtils.join(query.getIntervals(), ","); // Comma-separated intervals without brackets
     final String request = String.format(
             "http://%s/druid/v2/datasources/%s/candidates?intervals=%s",
-            address, query.getDataSource().getNames().get(0), intervals);
+            address, query.getDataSource().getNames().get(0), URLEncoder.encode(intervals, "UTF-8"));
     final InputStream response;
     try {
       response = DruidStorageHandlerUtils.submitRequest(client, new Request(HttpMethod.GET, new URL(request)));


[43/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index f547651,8ee84af..1de9056
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@@ -14947,6 -14782,171 +15014,171 @@@ class drop_table_with_environment_conte
    def __ne__(self, other):
      return not (self == other)
  
+ class truncate_table_args:
+   """
+   Attributes:
+    - dbName
+    - tableName
+    - partNames
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'dbName', None, None, ), # 1
+     (2, TType.STRING, 'tableName', None, None, ), # 2
+     (3, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 3
+   )
+ 
+   def __init__(self, dbName=None, tableName=None, partNames=None,):
+     self.dbName = dbName
+     self.tableName = tableName
+     self.partNames = partNames
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.dbName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.tableName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.LIST:
+           self.partNames = []
 -          (_etype662, _size659) = iprot.readListBegin()
 -          for _i663 in xrange(_size659):
 -            _elem664 = iprot.readString()
 -            self.partNames.append(_elem664)
++          (_etype669, _size666) = iprot.readListBegin()
++          for _i670 in xrange(_size666):
++            _elem671 = iprot.readString()
++            self.partNames.append(_elem671)
+           iprot.readListEnd()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('truncate_table_args')
+     if self.dbName is not None:
+       oprot.writeFieldBegin('dbName', TType.STRING, 1)
+       oprot.writeString(self.dbName)
+       oprot.writeFieldEnd()
+     if self.tableName is not None:
+       oprot.writeFieldBegin('tableName', TType.STRING, 2)
+       oprot.writeString(self.tableName)
+       oprot.writeFieldEnd()
+     if self.partNames is not None:
+       oprot.writeFieldBegin('partNames', TType.LIST, 3)
+       oprot.writeListBegin(TType.STRING, len(self.partNames))
 -      for iter665 in self.partNames:
 -        oprot.writeString(iter665)
++      for iter672 in self.partNames:
++        oprot.writeString(iter672)
+       oprot.writeListEnd()
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.dbName)
+     value = (value * 31) ^ hash(self.tableName)
+     value = (value * 31) ^ hash(self.partNames)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class truncate_table_result:
+   """
+   Attributes:
+    - o1
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+   )
+ 
+   def __init__(self, o1=None,):
+     self.o1 = o1
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRUCT:
+           self.o1 = MetaException()
+           self.o1.read(iprot)
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('truncate_table_result')
+     if self.o1 is not None:
+       oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+       self.o1.write(oprot)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.o1)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
  class get_tables_args:
    """
    Attributes:
@@@ -15053,10 -15053,10 +15285,10 @@@ class get_tables_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype669, _size666) = iprot.readListBegin()
--          for _i670 in xrange(_size666):
--            _elem671 = iprot.readString()
--            self.success.append(_elem671)
++          (_etype676, _size673) = iprot.readListBegin()
++          for _i677 in xrange(_size673):
++            _elem678 = iprot.readString()
++            self.success.append(_elem678)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -15079,8 -15079,8 +15311,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter672 in self.success:
--        oprot.writeString(iter672)
++      for iter679 in self.success:
++        oprot.writeString(iter679)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -15230,10 -15230,10 +15462,10 @@@ class get_tables_by_type_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype676, _size673) = iprot.readListBegin()
--          for _i677 in xrange(_size673):
--            _elem678 = iprot.readString()
--            self.success.append(_elem678)
++          (_etype683, _size680) = iprot.readListBegin()
++          for _i684 in xrange(_size680):
++            _elem685 = iprot.readString()
++            self.success.append(_elem685)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -15256,8 -15256,8 +15488,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter679 in self.success:
--        oprot.writeString(iter679)
++      for iter686 in self.success:
++        oprot.writeString(iter686)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -15330,10 -15330,10 +15562,10 @@@ class get_table_meta_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.tbl_types = []
--          (_etype683, _size680) = iprot.readListBegin()
--          for _i684 in xrange(_size680):
--            _elem685 = iprot.readString()
--            self.tbl_types.append(_elem685)
++          (_etype690, _size687) = iprot.readListBegin()
++          for _i691 in xrange(_size687):
++            _elem692 = iprot.readString()
++            self.tbl_types.append(_elem692)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -15358,8 -15358,8 +15590,8 @@@
      if self.tbl_types is not None:
        oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.tbl_types))
--      for iter686 in self.tbl_types:
--        oprot.writeString(iter686)
++      for iter693 in self.tbl_types:
++        oprot.writeString(iter693)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -15415,11 -15415,11 +15647,11 @@@ class get_table_meta_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype690, _size687) = iprot.readListBegin()
--          for _i691 in xrange(_size687):
--            _elem692 = TableMeta()
--            _elem692.read(iprot)
--            self.success.append(_elem692)
++          (_etype697, _size694) = iprot.readListBegin()
++          for _i698 in xrange(_size694):
++            _elem699 = TableMeta()
++            _elem699.read(iprot)
++            self.success.append(_elem699)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -15442,8 -15442,8 +15674,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter693 in self.success:
--        iter693.write(oprot)
++      for iter700 in self.success:
++        iter700.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -15567,10 -15567,10 +15799,10 @@@ class get_all_tables_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype697, _size694) = iprot.readListBegin()
--          for _i698 in xrange(_size694):
--            _elem699 = iprot.readString()
--            self.success.append(_elem699)
++          (_etype704, _size701) = iprot.readListBegin()
++          for _i705 in xrange(_size701):
++            _elem706 = iprot.readString()
++            self.success.append(_elem706)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -15593,8 -15593,8 +15825,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter700 in self.success:
--        oprot.writeString(iter700)
++      for iter707 in self.success:
++        oprot.writeString(iter707)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -15830,10 -15830,10 +16062,10 @@@ class get_table_objects_by_name_args
        elif fid == 2:
          if ftype == TType.LIST:
            self.tbl_names = []
--          (_etype704, _size701) = iprot.readListBegin()
--          for _i705 in xrange(_size701):
--            _elem706 = iprot.readString()
--            self.tbl_names.append(_elem706)
++          (_etype711, _size708) = iprot.readListBegin()
++          for _i712 in xrange(_size708):
++            _elem713 = iprot.readString()
++            self.tbl_names.append(_elem713)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -15854,8 -15854,8 +16086,8 @@@
      if self.tbl_names is not None:
        oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
        oprot.writeListBegin(TType.STRING, len(self.tbl_names))
--      for iter707 in self.tbl_names:
--        oprot.writeString(iter707)
++      for iter714 in self.tbl_names:
++        oprot.writeString(iter714)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -15907,11 -15907,11 +16139,11 @@@ class get_table_objects_by_name_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype711, _size708) = iprot.readListBegin()
--          for _i712 in xrange(_size708):
--            _elem713 = Table()
--            _elem713.read(iprot)
--            self.success.append(_elem713)
++          (_etype718, _size715) = iprot.readListBegin()
++          for _i719 in xrange(_size715):
++            _elem720 = Table()
++            _elem720.read(iprot)
++            self.success.append(_elem720)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -15928,8 -15928,8 +16160,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter714 in self.success:
--        iter714.write(oprot)
++      for iter721 in self.success:
++        iter721.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -16412,10 -16412,10 +16644,10 @@@ class get_table_names_by_filter_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype718, _size715) = iprot.readListBegin()
--          for _i719 in xrange(_size715):
--            _elem720 = iprot.readString()
--            self.success.append(_elem720)
++          (_etype725, _size722) = iprot.readListBegin()
++          for _i726 in xrange(_size722):
++            _elem727 = iprot.readString()
++            self.success.append(_elem727)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -16450,8 -16450,8 +16682,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter721 in self.success:
--        oprot.writeString(iter721)
++      for iter728 in self.success:
++        oprot.writeString(iter728)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -17421,11 -17421,11 +17653,11 @@@ class add_partitions_args
        if fid == 1:
          if ftype == TType.LIST:
            self.new_parts = []
--          (_etype725, _size722) = iprot.readListBegin()
--          for _i726 in xrange(_size722):
--            _elem727 = Partition()
--            _elem727.read(iprot)
--            self.new_parts.append(_elem727)
++          (_etype732, _size729) = iprot.readListBegin()
++          for _i733 in xrange(_size729):
++            _elem734 = Partition()
++            _elem734.read(iprot)
++            self.new_parts.append(_elem734)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -17442,8 -17442,8 +17674,8 @@@
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 1)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
--      for iter728 in self.new_parts:
--        iter728.write(oprot)
++      for iter735 in self.new_parts:
++        iter735.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -17601,11 -17601,11 +17833,11 @@@ class add_partitions_pspec_args
        if fid == 1:
          if ftype == TType.LIST:
            self.new_parts = []
--          (_etype732, _size729) = iprot.readListBegin()
--          for _i733 in xrange(_size729):
--            _elem734 = PartitionSpec()
--            _elem734.read(iprot)
--            self.new_parts.append(_elem734)
++          (_etype739, _size736) = iprot.readListBegin()
++          for _i740 in xrange(_size736):
++            _elem741 = PartitionSpec()
++            _elem741.read(iprot)
++            self.new_parts.append(_elem741)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -17622,8 -17622,8 +17854,8 @@@
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 1)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
--      for iter735 in self.new_parts:
--        iter735.write(oprot)
++      for iter742 in self.new_parts:
++        iter742.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -17797,10 -17797,10 +18029,10 @@@ class append_partition_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype739, _size736) = iprot.readListBegin()
--          for _i740 in xrange(_size736):
--            _elem741 = iprot.readString()
--            self.part_vals.append(_elem741)
++          (_etype746, _size743) = iprot.readListBegin()
++          for _i747 in xrange(_size743):
++            _elem748 = iprot.readString()
++            self.part_vals.append(_elem748)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -17825,8 -17825,8 +18057,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter742 in self.part_vals:
--        oprot.writeString(iter742)
++      for iter749 in self.part_vals:
++        oprot.writeString(iter749)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -18179,10 -18179,10 +18411,10 @@@ class append_partition_with_environment
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype746, _size743) = iprot.readListBegin()
--          for _i747 in xrange(_size743):
--            _elem748 = iprot.readString()
--            self.part_vals.append(_elem748)
++          (_etype753, _size750) = iprot.readListBegin()
++          for _i754 in xrange(_size750):
++            _elem755 = iprot.readString()
++            self.part_vals.append(_elem755)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -18213,8 -18213,8 +18445,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter749 in self.part_vals:
--        oprot.writeString(iter749)
++      for iter756 in self.part_vals:
++        oprot.writeString(iter756)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.environment_context is not None:
@@@ -18809,10 -18809,10 +19041,10 @@@ class drop_partition_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype753, _size750) = iprot.readListBegin()
--          for _i754 in xrange(_size750):
--            _elem755 = iprot.readString()
--            self.part_vals.append(_elem755)
++          (_etype760, _size757) = iprot.readListBegin()
++          for _i761 in xrange(_size757):
++            _elem762 = iprot.readString()
++            self.part_vals.append(_elem762)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -18842,8 -18842,8 +19074,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter756 in self.part_vals:
--        oprot.writeString(iter756)
++      for iter763 in self.part_vals:
++        oprot.writeString(iter763)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.deleteData is not None:
@@@ -19016,10 -19016,10 +19248,10 @@@ class drop_partition_with_environment_c
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype760, _size757) = iprot.readListBegin()
--          for _i761 in xrange(_size757):
--            _elem762 = iprot.readString()
--            self.part_vals.append(_elem762)
++          (_etype767, _size764) = iprot.readListBegin()
++          for _i768 in xrange(_size764):
++            _elem769 = iprot.readString()
++            self.part_vals.append(_elem769)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -19055,8 -19055,8 +19287,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter763 in self.part_vals:
--        oprot.writeString(iter763)
++      for iter770 in self.part_vals:
++        oprot.writeString(iter770)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.deleteData is not None:
@@@ -19793,10 -19793,10 +20025,10 @@@ class get_partition_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype767, _size764) = iprot.readListBegin()
--          for _i768 in xrange(_size764):
--            _elem769 = iprot.readString()
--            self.part_vals.append(_elem769)
++          (_etype774, _size771) = iprot.readListBegin()
++          for _i775 in xrange(_size771):
++            _elem776 = iprot.readString()
++            self.part_vals.append(_elem776)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -19821,8 -19821,8 +20053,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter770 in self.part_vals:
--        oprot.writeString(iter770)
++      for iter777 in self.part_vals:
++        oprot.writeString(iter777)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -19981,11 -19981,11 +20213,11 @@@ class exchange_partition_args
        if fid == 1:
          if ftype == TType.MAP:
            self.partitionSpecs = {}
--          (_ktype772, _vtype773, _size771 ) = iprot.readMapBegin()
--          for _i775 in xrange(_size771):
--            _key776 = iprot.readString()
--            _val777 = iprot.readString()
--            self.partitionSpecs[_key776] = _val777
++          (_ktype779, _vtype780, _size778 ) = iprot.readMapBegin()
++          for _i782 in xrange(_size778):
++            _key783 = iprot.readString()
++            _val784 = iprot.readString()
++            self.partitionSpecs[_key783] = _val784
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@@ -20022,9 -20022,9 +20254,9 @@@
      if self.partitionSpecs is not None:
        oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
--      for kiter778,viter779 in self.partitionSpecs.items():
--        oprot.writeString(kiter778)
--        oprot.writeString(viter779)
++      for kiter785,viter786 in self.partitionSpecs.items():
++        oprot.writeString(kiter785)
++        oprot.writeString(viter786)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.source_db is not None:
@@@ -20229,11 -20229,11 +20461,11 @@@ class exchange_partitions_args
        if fid == 1:
          if ftype == TType.MAP:
            self.partitionSpecs = {}
--          (_ktype781, _vtype782, _size780 ) = iprot.readMapBegin()
--          for _i784 in xrange(_size780):
--            _key785 = iprot.readString()
--            _val786 = iprot.readString()
--            self.partitionSpecs[_key785] = _val786
++          (_ktype788, _vtype789, _size787 ) = iprot.readMapBegin()
++          for _i791 in xrange(_size787):
++            _key792 = iprot.readString()
++            _val793 = iprot.readString()
++            self.partitionSpecs[_key792] = _val793
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@@ -20270,9 -20270,9 +20502,9 @@@
      if self.partitionSpecs is not None:
        oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
--      for kiter787,viter788 in self.partitionSpecs.items():
--        oprot.writeString(kiter787)
--        oprot.writeString(viter788)
++      for kiter794,viter795 in self.partitionSpecs.items():
++        oprot.writeString(kiter794)
++        oprot.writeString(viter795)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.source_db is not None:
@@@ -20355,11 -20355,11 +20587,11 @@@ class exchange_partitions_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype792, _size789) = iprot.readListBegin()
--          for _i793 in xrange(_size789):
--            _elem794 = Partition()
--            _elem794.read(iprot)
--            self.success.append(_elem794)
++          (_etype799, _size796) = iprot.readListBegin()
++          for _i800 in xrange(_size796):
++            _elem801 = Partition()
++            _elem801.read(iprot)
++            self.success.append(_elem801)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -20400,8 -20400,8 +20632,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter795 in self.success:
--        iter795.write(oprot)
++      for iter802 in self.success:
++        iter802.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -20495,10 -20495,10 +20727,10 @@@ class get_partition_with_auth_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype799, _size796) = iprot.readListBegin()
--          for _i800 in xrange(_size796):
--            _elem801 = iprot.readString()
--            self.part_vals.append(_elem801)
++          (_etype806, _size803) = iprot.readListBegin()
++          for _i807 in xrange(_size803):
++            _elem808 = iprot.readString()
++            self.part_vals.append(_elem808)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -20510,10 -20510,10 +20742,10 @@@
        elif fid == 5:
          if ftype == TType.LIST:
            self.group_names = []
--          (_etype805, _size802) = iprot.readListBegin()
--          for _i806 in xrange(_size802):
--            _elem807 = iprot.readString()
--            self.group_names.append(_elem807)
++          (_etype812, _size809) = iprot.readListBegin()
++          for _i813 in xrange(_size809):
++            _elem814 = iprot.readString()
++            self.group_names.append(_elem814)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -20538,8 -20538,8 +20770,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter808 in self.part_vals:
--        oprot.writeString(iter808)
++      for iter815 in self.part_vals:
++        oprot.writeString(iter815)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.user_name is not None:
@@@ -20549,8 -20549,8 +20781,8 @@@
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 5)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
--      for iter809 in self.group_names:
--        oprot.writeString(iter809)
++      for iter816 in self.group_names:
++        oprot.writeString(iter816)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -20979,11 -20979,11 +21211,11 @@@ class get_partitions_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype813, _size810) = iprot.readListBegin()
--          for _i814 in xrange(_size810):
--            _elem815 = Partition()
--            _elem815.read(iprot)
--            self.success.append(_elem815)
++          (_etype820, _size817) = iprot.readListBegin()
++          for _i821 in xrange(_size817):
++            _elem822 = Partition()
++            _elem822.read(iprot)
++            self.success.append(_elem822)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21012,8 -21012,8 +21244,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter816 in self.success:
--        iter816.write(oprot)
++      for iter823 in self.success:
++        iter823.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -21107,10 -21107,10 +21339,10 @@@ class get_partitions_with_auth_args
        elif fid == 5:
          if ftype == TType.LIST:
            self.group_names = []
--          (_etype820, _size817) = iprot.readListBegin()
--          for _i821 in xrange(_size817):
--            _elem822 = iprot.readString()
--            self.group_names.append(_elem822)
++          (_etype827, _size824) = iprot.readListBegin()
++          for _i828 in xrange(_size824):
++            _elem829 = iprot.readString()
++            self.group_names.append(_elem829)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21143,8 -21143,8 +21375,8 @@@
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 5)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
--      for iter823 in self.group_names:
--        oprot.writeString(iter823)
++      for iter830 in self.group_names:
++        oprot.writeString(iter830)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -21205,11 -21205,11 +21437,11 @@@ class get_partitions_with_auth_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype827, _size824) = iprot.readListBegin()
--          for _i828 in xrange(_size824):
--            _elem829 = Partition()
--            _elem829.read(iprot)
--            self.success.append(_elem829)
++          (_etype834, _size831) = iprot.readListBegin()
++          for _i835 in xrange(_size831):
++            _elem836 = Partition()
++            _elem836.read(iprot)
++            self.success.append(_elem836)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21238,8 -21238,8 +21470,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter830 in self.success:
--        iter830.write(oprot)
++      for iter837 in self.success:
++        iter837.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -21397,11 -21397,11 +21629,11 @@@ class get_partitions_pspec_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype834, _size831) = iprot.readListBegin()
--          for _i835 in xrange(_size831):
--            _elem836 = PartitionSpec()
--            _elem836.read(iprot)
--            self.success.append(_elem836)
++          (_etype841, _size838) = iprot.readListBegin()
++          for _i842 in xrange(_size838):
++            _elem843 = PartitionSpec()
++            _elem843.read(iprot)
++            self.success.append(_elem843)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21430,8 -21430,8 +21662,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter837 in self.success:
--        iter837.write(oprot)
++      for iter844 in self.success:
++        iter844.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -21586,10 -21586,10 +21818,10 @@@ class get_partition_names_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype841, _size838) = iprot.readListBegin()
--          for _i842 in xrange(_size838):
--            _elem843 = iprot.readString()
--            self.success.append(_elem843)
++          (_etype848, _size845) = iprot.readListBegin()
++          for _i849 in xrange(_size845):
++            _elem850 = iprot.readString()
++            self.success.append(_elem850)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21612,8 -21612,8 +21844,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter844 in self.success:
--        oprot.writeString(iter844)
++      for iter851 in self.success:
++        oprot.writeString(iter851)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o2 is not None:
@@@ -21689,10 -21689,10 +21921,10 @@@ class get_partitions_ps_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype848, _size845) = iprot.readListBegin()
--          for _i849 in xrange(_size845):
--            _elem850 = iprot.readString()
--            self.part_vals.append(_elem850)
++          (_etype855, _size852) = iprot.readListBegin()
++          for _i856 in xrange(_size852):
++            _elem857 = iprot.readString()
++            self.part_vals.append(_elem857)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21722,8 -21722,8 +21954,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter851 in self.part_vals:
--        oprot.writeString(iter851)
++      for iter858 in self.part_vals:
++        oprot.writeString(iter858)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@@ -21787,11 -21787,11 +22019,11 @@@ class get_partitions_ps_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype855, _size852) = iprot.readListBegin()
--          for _i856 in xrange(_size852):
--            _elem857 = Partition()
--            _elem857.read(iprot)
--            self.success.append(_elem857)
++          (_etype862, _size859) = iprot.readListBegin()
++          for _i863 in xrange(_size859):
++            _elem864 = Partition()
++            _elem864.read(iprot)
++            self.success.append(_elem864)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21820,8 -21820,8 +22052,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter858 in self.success:
--        iter858.write(oprot)
++      for iter865 in self.success:
++        iter865.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -21908,10 -21908,10 +22140,10 @@@ class get_partitions_ps_with_auth_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype862, _size859) = iprot.readListBegin()
--          for _i863 in xrange(_size859):
--            _elem864 = iprot.readString()
--            self.part_vals.append(_elem864)
++          (_etype869, _size866) = iprot.readListBegin()
++          for _i870 in xrange(_size866):
++            _elem871 = iprot.readString()
++            self.part_vals.append(_elem871)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21928,10 -21928,10 +22160,10 @@@
        elif fid == 6:
          if ftype == TType.LIST:
            self.group_names = []
--          (_etype868, _size865) = iprot.readListBegin()
--          for _i869 in xrange(_size865):
--            _elem870 = iprot.readString()
--            self.group_names.append(_elem870)
++          (_etype875, _size872) = iprot.readListBegin()
++          for _i876 in xrange(_size872):
++            _elem877 = iprot.readString()
++            self.group_names.append(_elem877)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21956,8 -21956,8 +22188,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter871 in self.part_vals:
--        oprot.writeString(iter871)
++      for iter878 in self.part_vals:
++        oprot.writeString(iter878)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@@ -21971,8 -21971,8 +22203,8 @@@
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 6)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
--      for iter872 in self.group_names:
--        oprot.writeString(iter872)
++      for iter879 in self.group_names:
++        oprot.writeString(iter879)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -22034,11 -22034,11 +22266,11 @@@ class get_partitions_ps_with_auth_resul
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype876, _size873) = iprot.readListBegin()
--          for _i877 in xrange(_size873):
--            _elem878 = Partition()
--            _elem878.read(iprot)
--            self.success.append(_elem878)
++          (_etype883, _size880) = iprot.readListBegin()
++          for _i884 in xrange(_size880):
++            _elem885 = Partition()
++            _elem885.read(iprot)
++            self.success.append(_elem885)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -22067,8 -22067,8 +22299,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter879 in self.success:
--        iter879.write(oprot)
++      for iter886 in self.success:
++        iter886.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -22149,10 -22149,10 +22381,10 @@@ class get_partition_names_ps_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype883, _size880) = iprot.readListBegin()
--          for _i884 in xrange(_size880):
--            _elem885 = iprot.readString()
--            self.part_vals.append(_elem885)
++          (_etype890, _size887) = iprot.readListBegin()
++          for _i891 in xrange(_size887):
++            _elem892 = iprot.readString()
++            self.part_vals.append(_elem892)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -22182,8 -22182,8 +22414,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter886 in self.part_vals:
--        oprot.writeString(iter886)
++      for iter893 in self.part_vals:
++        oprot.writeString(iter893)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@@ -22247,10 -22247,10 +22479,10 @@@ class get_partition_names_ps_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype890, _size887) = iprot.readListBegin()
--          for _i891 in xrange(_size887):
--            _elem892 = iprot.readString()
--            self.success.append(_elem892)
++          (_etype897, _size894) = iprot.readListBegin()
++          for _i898 in xrange(_size894):
++            _elem899 = iprot.readString()
++            self.success.append(_elem899)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -22279,8 -22279,8 +22511,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter893 in self.success:
--        oprot.writeString(iter893)
++      for iter900 in self.success:
++        oprot.writeString(iter900)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -22451,11 -22451,11 +22683,11 @@@ class get_partitions_by_filter_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype897, _size894) = iprot.readListBegin()
--          for _i898 in xrange(_size894):
--            _elem899 = Partition()
--            _elem899.read(iprot)
--            self.success.append(_elem899)
++          (_etype904, _size901) = iprot.readListBegin()
++          for _i905 in xrange(_size901):
++            _elem906 = Partition()
++            _elem906.read(iprot)
++            self.success.append(_elem906)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -22484,8 -22484,8 +22716,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter900 in self.success:
--        iter900.write(oprot)
++      for iter907 in self.success:
++        iter907.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -22656,11 -22656,11 +22888,11 @@@ class get_part_specs_by_filter_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype904, _size901) = iprot.readListBegin()
--          for _i905 in xrange(_size901):
--            _elem906 = PartitionSpec()
--            _elem906.read(iprot)
--            self.success.append(_elem906)
++          (_etype911, _size908) = iprot.readListBegin()
++          for _i912 in xrange(_size908):
++            _elem913 = PartitionSpec()
++            _elem913.read(iprot)
++            self.success.append(_elem913)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -22689,8 -22689,8 +22921,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter907 in self.success:
--        iter907.write(oprot)
++      for iter914 in self.success:
++        iter914.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -23110,10 -23110,10 +23342,10 @@@ class get_partitions_by_names_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.names = []
--          (_etype911, _size908) = iprot.readListBegin()
--          for _i912 in xrange(_size908):
--            _elem913 = iprot.readString()
--            self.names.append(_elem913)
++          (_etype918, _size915) = iprot.readListBegin()
++          for _i919 in xrange(_size915):
++            _elem920 = iprot.readString()
++            self.names.append(_elem920)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -23138,8 -23138,8 +23370,8 @@@
      if self.names is not None:
        oprot.writeFieldBegin('names', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.names))
--      for iter914 in self.names:
--        oprot.writeString(iter914)
++      for iter921 in self.names:
++        oprot.writeString(iter921)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -23198,11 -23198,11 +23430,11 @@@ class get_partitions_by_names_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype918, _size915) = iprot.readListBegin()
--          for _i919 in xrange(_size915):
--            _elem920 = Partition()
--            _elem920.read(iprot)
--            self.success.append(_elem920)
++          (_etype925, _size922) = iprot.readListBegin()
++          for _i926 in xrange(_size922):
++            _elem927 = Partition()
++            _elem927.read(iprot)
++            self.success.append(_elem927)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -23231,8 -23231,8 +23463,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter921 in self.success:
--        iter921.write(oprot)
++      for iter928 in self.success:
++        iter928.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -23482,11 -23482,11 +23714,11 @@@ class alter_partitions_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.new_parts = []
--          (_etype925, _size922) = iprot.readListBegin()
--          for _i926 in xrange(_size922):
--            _elem927 = Partition()
--            _elem927.read(iprot)
--            self.new_parts.append(_elem927)
++          (_etype932, _size929) = iprot.readListBegin()
++          for _i933 in xrange(_size929):
++            _elem934 = Partition()
++            _elem934.read(iprot)
++            self.new_parts.append(_elem934)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -23511,8 -23511,8 +23743,8 @@@
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 3)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
--      for iter928 in self.new_parts:
--        iter928.write(oprot)
++      for iter935 in self.new_parts:
++        iter935.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -23665,11 -23665,11 +23897,11 @@@ class alter_partitions_with_environment
        elif fid == 3:
          if ftype == TType.LIST:
            self.new_parts = []
--          (_etype932, _size929) = iprot.readListBegin()
--          for _i933 in xrange(_size929):
--            _elem934 = Partition()
--            _elem934.read(iprot)
--            self.new_parts.append(_elem934)
++          (_etype939, _size936) = iprot.readListBegin()
++          for _i940 in xrange(_size936):
++            _elem941 = Partition()
++            _elem941.read(iprot)
++            self.new_parts.append(_elem941)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -23700,8 -23700,8 +23932,8 @@@
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 3)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
--      for iter935 in self.new_parts:
--        iter935.write(oprot)
++      for iter942 in self.new_parts:
++        iter942.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.environment_context is not None:
@@@ -24045,10 -24045,10 +24277,10 @@@ class rename_partition_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype939, _size936) = iprot.readListBegin()
--          for _i940 in xrange(_size936):
--            _elem941 = iprot.readString()
--            self.part_vals.append(_elem941)
++          (_etype946, _size943) = iprot.readListBegin()
++          for _i947 in xrange(_size943):
++            _elem948 = iprot.readString()
++            self.part_vals.append(_elem948)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -24079,8 -24079,8 +24311,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter942 in self.part_vals:
--        oprot.writeString(iter942)
++      for iter949 in self.part_vals:
++        oprot.writeString(iter949)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.new_part is not None:
@@@ -24222,10 -24222,10 +24454,10 @@@ class partition_name_has_valid_characte
        if fid == 1:
          if ftype == TType.LIST:
            self.part_vals = []
--          (_etype946, _size943) = iprot.readListBegin()
--          for _i947 in xrange(_size943):
--            _elem948 = iprot.readString()
--            self.part_vals.append(_elem948)
++          (_etype953, _size950) = iprot.readListBegin()
++          for _i954 in xrange(_size950):
++            _elem955 = iprot.readString()
++            self.part_vals.append(_elem955)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -24247,8 -24247,8 +24479,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 1)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
--      for iter949 in self.part_vals:
--        oprot.writeString(iter949)
++      for iter956 in self.part_vals:
++        oprot.writeString(iter956)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.throw_exception is not None:
@@@ -24606,10 -24606,10 +24838,10 @@@ class partition_name_to_vals_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype953, _size950) = iprot.readListBegin()
--          for _i954 in xrange(_size950):
--            _elem955 = iprot.readString()
--            self.success.append(_elem955)
++          (_etype960, _size957) = iprot.readListBegin()
++          for _i961 in xrange(_size957):
++            _elem962 = iprot.readString()
++            self.success.append(_elem962)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -24632,8 -24632,8 +24864,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter956 in self.success:
--        oprot.writeString(iter956)
++      for iter963 in self.success:
++        oprot.writeString(iter963)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -24757,11 -24757,11 +24989,11 @@@ class partition_name_to_spec_result
        if fid == 0:
          if ftype == TType.MAP:
            self.success = {}
--          (_ktype958, _vtype959, _size957 ) = iprot.readMapBegin()
--          for _i961 in xrange(_size957):
--            _key962 = iprot.readString()
--            _val963 = iprot.readString()
--            self.success[_key962] = _val963
++          (_ktype965, _vtype966, _size964 ) = iprot.readMapBegin()
++          for _i968 in xrange(_size964):
++            _key969 = iprot.readString()
++            _val970 = iprot.readString()
++            self.success[_key969] = _val970
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@@ -24784,9 -24784,9 +25016,9 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.MAP, 0)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
--      for kiter964,viter965 in self.success.items():
--        oprot.writeString(kiter964)
--        oprot.writeString(viter965)
++      for kiter971,viter972 in self.success.items():
++        oprot.writeString(kiter971)
++        oprot.writeString(viter972)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -24862,11 -24862,11 +25094,11 @@@ class markPartitionForEvent_args
        elif fid == 3:
          if ftype == TType.MAP:
            self.part_vals = {}
--          (_ktype967, _vtype968, _size966 ) = iprot.readMapBegin()
--          for _i970 in xrange(_size966):
--            _key971 = iprot.readString()
--            _val972 = iprot.readString()
--            self.part_vals[_key971] = _val972
++          (_ktype974, _vtype975, _size973 ) = iprot.readMapBegin()
++          for _i977 in xrange(_size973):
++            _key978 = iprot.readString()
++            _val979 = iprot.readString()
++            self.part_vals[_key978] = _val979
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@@ -24896,9 -24896,9 +25128,9 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.MAP, 3)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
--      for kiter973,viter974 in self.part_vals.items():
--        oprot.writeString(kiter973)
--        oprot.writeString(viter974)
++      for kiter980,viter981 in self.part_vals.items():
++        oprot.writeString(kiter980)
++        oprot.writeString(viter981)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.eventType is not None:
@@@ -25112,11 -25112,11 +25344,11 @@@ class isPartitionMarkedForEvent_args
        elif fid == 3:
          if ftype == TType.MAP:
            self.part_vals = {}
--          (_ktype976, _vtype977, _size975 ) = iprot.readMapBegin()
--          for _i979 in xrange(_size975):
--            _key980 = iprot.readString()
--            _val981 = iprot.readString()
--            self.part_vals[_key980] = _val981
++          (_ktype983, _vtype984, _size982 ) = iprot.readMapBegin()
++          for _i986 in xrange(_size982):
++            _key987 = iprot.readString()
++            _val988 = iprot.readString()
++            self.part_vals[_key987] = _val988
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@@ -25146,9 -25146,9 +25378,9 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.MAP, 3)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
--      for kiter982,viter983 in self.part_vals.items():
--        oprot.writeString(kiter982)
--        oprot.writeString(viter983)
++      for kiter989,viter990 in self.part_vals.items():
++        oprot.writeString(kiter989)
++        oprot.writeString(viter990)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.eventType is not None:
@@@ -26203,11 -26203,11 +26435,11 @@@ class get_indexes_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype987, _size984) = iprot.readListBegin()
--          for _i988 in xrange(_size984):
--            _elem989 = Index()
--            _elem989.read(iprot)
--            self.success.append(_elem989)
++          (_etype994, _size991) = iprot.readListBegin()
++          for _i995 in xrange(_size991):
++            _elem996 = Index()
++            _elem996.read(iprot)
++            self.success.append(_elem996)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -26236,8 -26236,8 +26468,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter990 in self.success:
--        iter990.write(oprot)
++      for iter997 in self.success:
++        iter997.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -26392,10 -26392,10 +26624,10 @@@ class get_index_names_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype994, _size991) = iprot.readListBegin()
--          for _i995 in xrange(_size991):
--            _elem996 = iprot.readString()
--            self.success.append(_elem996)
++          (_etype1001, _size998) = iprot.readListBegin()
++          for _i1002 in xrange(_size998):
++            _elem1003 = iprot.readString()
++            self.success.append(_elem1003)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -26418,8 -26418,8 +26650,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter997 in self.success:
--        oprot.writeString(iter997)
++      for iter1004 in self.success:
++        oprot.writeString(iter1004)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o2 is not None:
@@@ -29285,10 -29285,10 +29517,10 @@@ class get_functions_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype1001, _size998) = iprot.readListBegin()
--          for _i1002 in xrange(_size998):
--            _elem1003 = iprot.readString()
--            self.success.append(_elem1003)
++          (_etype1008, _size1005) = iprot.readListBegin()
++          for _i1009 in xrange(_size1005):
++            _elem1010 = iprot.readString()
++            self.success.append(_elem1010)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -29311,8 -29311,8 +29543,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter1004 in self.success:
--        oprot.writeString(iter1004)
++      for iter1011 in self.success:
++        oprot.writeString(iter1011)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -30000,10 -30000,10 +30232,10 @@@ class get_role_names_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype1008, _size1005) = iprot.readListBegin()
--          for _i1009 in xrange(_size1005):
--            _elem1010 = iprot.readString()
--            self.success.append(_elem1010)
++          (_etype1015, _size1012) = iprot.readListBegin()
++          for _i1016 in xrange(_size1012):
++            _elem1017 = iprot.readString()
++            self.success.append(_elem1017)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -30026,8 -30026,8 +30258,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter1011 in self.success:
--        oprot.writeString(iter1011)
++      for iter1018 in self.success:
++        oprot.writeString(iter1018)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -30541,11 -30541,11 +30773,11 @@@ class list_roles_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype1015, _size1012) = iprot.readListBegin()
--          for _i1016 in xrange(_size1012):
--            _elem1017 = Role()
--            _elem1017.read(iprot)
--            self.success.append(_elem1017)
++          (_etype1022, _size1019) = iprot.readListBegin()
++          for _i1023 in xrange(_size1019):
++            _elem1024 = Role()
++            _elem1024.read(iprot)
++            self.success.append(_elem1024)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -30568,8 -30568,8 +30800,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter1018 in self.success:
--        iter1018.write(oprot)
++      for iter1025 in self.success:
++        iter1025.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -31078,10 -31078,10 +31310,10 @@@ class get_privilege_set_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.group_names = []
--          (_etype1022, _size1019) = iprot.readListBegin()
--          for _i1023 in xrange(_size1019):
--            _elem1024 = iprot.readString()
--            self.group_names.append(_elem1024)
++          (_etype1029, _size1026) = iprot.readListBegin()
++          for _i1030 in xrange(_size1026):
++            _elem1031 = iprot.readString()
++            self.group_names.append(_elem1031)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -31106,8 -31106,8 +31338,8 @@@
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
--      for iter1025 in self.group_names:
--        oprot.writeString(iter1025)
++      for iter1032 in self.group_names:
++        oprot.writeString(iter1032)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -31334,11 -31334,11 +31566,11 @@@ class list_privileges_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype1029, _size1026) = iprot.readListBegin()
--          for _i1030 in xrange(_size1026):
--            _elem1031 = HiveObjectPrivilege()
--            _elem1031.read(iprot)
--            self.success.append(_elem1031)
++          (_etype1036, _size1033) = iprot.readListBegin()
++          for _i1037 in xrange(_size1033):
++            _elem1038 = HiveObjectPrivilege()
++            _elem1038.read(iprot)
++            self.success.append(_elem1038)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -31361,8 -31361,8 +31593,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
--      for iter1032 in self.success:
--        iter1032.write(oprot)
++      for iter1039 in self.success:
++        iter1039.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -31860,10 -31860,10 +32092,10 @@@ class set_ugi_args
        elif fid == 2:
          if ftype == TType.LIST:
            self.group_names = []
--          (_etype1036, _size1033) = iprot.readListBegin()
--          for _i1037 in xrange(_size1033):
--            _elem1038 = iprot.readString()
--            self.group_names.append(_elem1038)
++          (_etype1043, _size1040) = iprot.readListBegin()
++          for _i1044 in xrange(_size1040):
++            _elem1045 = iprot.readString()
++            self.group_names.append(_elem1045)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -31884,8 -31884,8 +32116,8 @@@
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 2)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
--      for iter1039 in self.group_names:
--        oprot.writeString(iter1039)
++      for iter1046 in self.group_names:
++        oprot.writeString(iter1046)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -31940,10 -31940,10 +32172,10 @@@ class set_ugi_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype1043, _size1040) = iprot.readListBegin()
--          for _i1044 in xrange(_size1040):
--            _elem1045 = iprot.readString()
--            self.success.append(_elem1045)
++          (_etype1050, _size1047) = iprot.readListBegin()
++          for _i1051 in xrange(_size1047):
++            _elem1052 = iprot.readString()
++            self.success.append(_elem1052)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -31966,8 -31966,8 +32198,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter1046 in self.success:
--        oprot.writeString(iter1046)
++      for iter1053 in self.success:
++        oprot.writeString(iter1053)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -32899,10 -32899,10 +33131,10 @@@ class get_all_token_identifiers_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype1050, _size1047) = iprot.readListBegin()
--          for _i1051 in xrange(_size1047):
--            _elem1052 = iprot.readString()
--            self.success.append(_elem1052)
++          (_etype1057, _size1054) = iprot.readListBegin()
++          for _i1058 in xrange(_size1054):
++            _elem1059 = iprot.readString()
++            self.success.append(_elem1059)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -32919,8 -32919,8 +33151,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter1053 in self.success:
--        oprot.writeString(iter1053)
++      for iter1060 in self.success:
++        oprot.writeString(iter1060)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -33447,10 -33447,10 +33679,10 @@@ class get_master_keys_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype1057, _size1054) = iprot.readListBegin()
--          for _i1058 in xrange(_size1054):
--            _elem1059 = iprot.readString()
--            self.success.append(_elem1059)
++          (_etype1064, _size1061) = iprot.readListBegin()
++          for _i1065 in xrange(_size1061):
++            _elem1066 = iprot.readString()
++            self.success.append(_elem1066)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -33467,8 -33467,8 +33699,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter1060 in self.success:
--        oprot.writeString(iter1060)
++      for iter1067 in self.success:
++        oprot.writeString(iter1067)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --cc metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 915bce3,cbcfc72..ff3505a
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@@ -78,7 -81,7 +82,8 @@@ import org.apache.hadoop.hive.common.me
  import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
  import org.apache.hadoop.hive.conf.HiveConf;
  import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+ import org.apache.hadoop.hive.io.HdfsUtils;
 +import org.apache.hadoop.hive.metastore.TableType;
  import org.apache.hadoop.hive.metastore.api.*;
  import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
  import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
@@@ -114,7 -117,7 +119,8 @@@ import org.apache.hadoop.hive.metastore
  import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
  import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
  import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 +import org.apache.hadoop.hive.metastore.model.MTableWrite;
  import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
  import org.apache.hadoop.hive.metastore.txn.TxnStore;
  import org.apache.hadoop.hive.metastore.txn.TxnUtils;
@@@ -7535,4 -7541,4 +7708,4 @@@ public class HiveMetaStore extends Thri
      }
      return fmHandlers;
    }
--}
++}

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --cc metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 6259cda,870896c..e1c59ec
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@@ -1933,59 -1930,27 +1933,82 @@@ public class MetaStoreUtils 
      csNew.setStatsObj(list);
    }
  
+   /**
+    * convert Exception to MetaException, which sets the cause to such exception
+    * @param e cause of the exception
+    * @return  the MetaException with the specified exception as the cause
+    */
+   public static MetaException newMetaException(Exception e) {
+     return newMetaException(e != null ? e.getMessage() : null, e);
+   }
+ 
+   /**
+    * convert Exception to MetaException, which sets the cause to such exception
+    * @param errorMessage  the error message for this MetaException
+    * @param e             cause of the exception
+    * @return  the MetaException with the specified exception as the cause
+    */
+   public static MetaException newMetaException(String errorMessage, Exception e) {
+     MetaException metaException = new MetaException(errorMessage);
+     if (e != null) {
+       metaException.initCause(e);
+     }
+     return metaException;
+   }
+ 
 +  // TODO The following two utility methods can be moved to AcidUtils once no class in metastore is relying on them,
 +  // right now ObjectStore.getAllMmTablesForCleanup is calling these method
 +  /**
 +   * Checks if a table is an ACID table that only supports INSERT, but not UPDATE/DELETE
 +   * @param params table properties
 +   * @return true if table is an INSERT_ONLY table, false otherwise
 +   */
 +  // TODO# also check that transactional is true
 +  public static boolean isInsertOnlyTable(Map<String, String> params) {
 +    return isInsertOnlyTable(params, false);
 +  }
 +
 +  public static boolean isInsertOnlyTable(Map<String, String> params, boolean isCtas) {
 +    String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
 +    return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
 +  }
 +
 +   public static boolean isInsertOnlyTable(Properties params) {
 +    // TODO#  redirect for now - fix before merge
 +    HashMap<String, String> testMap = new HashMap<String, String>();
 +    for (String n  : params.stringPropertyNames()) {
 +      testMap.put(n, params.getProperty(n));
 +    }
 +    return isInsertOnlyTable(testMap);
 +  }
 +
 +   /** The method for altering table props; may set the table to MM, non-MM, or not affect MM. */
 +  public static Boolean isToInsertOnlyTable(Map<String, String> props) {
 +    // TODO# Setting these separately is a very hairy issue in certain combinations, since we
 +    //       cannot decide what type of table this becomes without taking both into account, and
 +    //       in many cases the conversion might be illegal.
 +    //       The only thing we allow is tx = true w/o tx-props, for backward compat.
 +    String transactional = props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
 +    String transactionalProp = props.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
 +    if (transactional == null && transactionalProp == null) return null; // Not affected.
 +    boolean isSetToTxn = "true".equalsIgnoreCase(transactional);
 +    if (transactionalProp == null) {
 +      if (isSetToTxn) return false; // Assume the full ACID table.
 +      throw new RuntimeException("Cannot change '" + hive_metastoreConstants.TABLE_IS_TRANSACTIONAL
 +          + "' without '" + hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES + "'");
 +    }
 +    if (!"insert_only".equalsIgnoreCase(transactionalProp)) return false; // Not MM.
 +    if (!isSetToTxn) {
 +      throw new RuntimeException("Cannot set '"
 +          + hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES + "' to 'insert_only' without "
 +          + "setting '" + hive_metastoreConstants.TABLE_IS_TRANSACTIONAL + "' to 'true'");
 +    }
 +    return true;
 +  }
 +
 +  public static boolean isRemovedInsertOnlyTable(Set<String> removedSet) {
 +    boolean hasTxn = removedSet.contains(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL),
 +        hasProps = removedSet.contains(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
 +    return hasTxn || hasProps;
 +  }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --cc metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 51bc6d0,a83e12e..c351ffd
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@@ -8790,192 -8460,43 +8556,232 @@@ public class ObjectStore implements Raw
      }
    }
  
+   /**
+    * This is a cleanup method which is used to rollback a active transaction
+    * if the success flag is false and close the associated Query object. This method is used
+    * internally and visible for testing purposes only
+    * @param success Rollback the current active transaction if false
+    * @param query Query object which needs to be closed
+    */
+   @VisibleForTesting
+   void rollbackAndCleanup(boolean success, Query query) {
+     try {
 -      if(!success) {
++      if (!success) {
+         rollbackTransaction();
+       }
+     } finally {
+       if (query != null) {
+         query.closeAll();
+       }
+     }
+   }
+ 
+   /**
+    * This is a cleanup method which is used to rollback a active transaction
+    * if the success flag is false and close the associated QueryWrapper object. This method is used
+    * internally and visible for testing purposes only
+    * @param success Rollback the current active transaction if false
+    * @param queryWrapper QueryWrapper object which needs to be closed
+    */
+   @VisibleForTesting
+   void rollbackAndCleanup(boolean success, QueryWrapper queryWrapper) {
+     try {
 -      if(!success) {
++      if (!success) {
+         rollbackTransaction();
+       }
+     } finally {
+       if (queryWrapper != null) {
+         queryWrapper.close();
+       }
+     }
+   }
++
 +  @Override
 +  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
 +    boolean success = false;
 +    openTransaction();
 +    try {
 +      MTable mtbl = getMTable(tbl.getDbName(), tbl.getTableName());
 +      MTableWrite tw = new MTableWrite(mtbl, writeId, String.valueOf(state), heartbeat, heartbeat);
 +      pm.makePersistent(tw);
 +      success = true;
 +    } finally {
 +      if (success) {
 +        commitTransaction();
 +      } else {
 +        rollbackTransaction();
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public void updateTableWrite(MTableWrite tw) {
 +    boolean success = false;
 +    openTransaction();
 +    try {
 +      pm.makePersistent(tw);
 +      success = true;
 +    } finally {
 +      if (success) {
 +        commitTransaction();
 +      } else {
 +        rollbackTransaction();
 +      }
 +    }
 +  }
 +
 +  @Override
 +  public MTableWrite getTableWrite(
 +      String dbName, String tblName, long writeId) throws MetaException {
 +    boolean success = false;
 +    Query query = null;
 +    openTransaction();
 +    try {
 +      query = pm.newQuery(MTableWrite.class,
 +              "table.tableName == t1 && table.database.name == t2 && writeId == t3");
 +      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3");
 +      @SuppressWarnings("unchecked")
 +      List<MTableWrite> writes = (List<MTableWrite>) query.execute(tblName, dbName, writeId);
 +      pm.retrieveAll(writes);
 +      success = true;
 +      if (writes == null || writes.isEmpty()) return null;
 +      if (writes.size() > 1) {
 +        throw new MetaException(
 +            "More than one TableWrite for " + dbName + "." + tblName + " and " + writeId);
 +      }
 +      return writes.get(0);
 +    } finally {
 +      closeTransaction(success, query);
 +    }
 +  }
 +
 +  @Override
 +  public List<Long> getTableWriteIds(String dbName, String tblName,
 +      long watermarkId, long nextWriteId, char state) throws MetaException {
 +    boolean success = false;
 +    Query query = null;
 +    openTransaction();
 +    try {
 +      boolean hasState = (state != '\0');
 +      query = pm.newQuery("select writeId from org.apache.hadoop.hive.metastore.model.MTableWrite"
 +          + " where table.tableName == t1 && table.database.name == t2 && writeId > t3"
 +          + " && writeId < t4" + (hasState ? " && state == t5" : ""));
 +      query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3, "
 +          + "java.lang.Long t4" + (hasState ? ", java.lang.String t5" : ""));
 +      query.setResult("writeId");
 +      query.setOrdering("writeId asc");
 +      @SuppressWarnings("unchecked")
 +      List<Long> writes = (List<Long>) (hasState
 +          ? query.executeWithArray(tblName, dbName, watermarkId, nextWriteId, String.valueOf(state))
 +          : query.executeWithArray(tblName, dbName, watermarkId, nextWriteId));
 +      success = true;
 +      return (writes == null) ? new ArrayList<Long>() : new ArrayList<>(writes);
 +    } finally {
 +      closeTransaction(success, query);
 +    }
 +  }
 +
 +  @Override
 +  public List<MTableWrite> getTableWrites(
 +      String dbName, String tblName, long from, long to) throws MetaException {
 +    boolean success = false;
 +    dbName = HiveStringUtils.normalizeIdentifier(dbName);
 +    tblName = HiveStringUtils.normalizeIdentifier(tblName);
 +    Query query = null;
 +    openTransaction();
 +    try {
 +      String queryStr = "table.tableName == t1 && table.database.name == t2 && writeId > t3",
 +          argStr = "java.lang.String t1, java.lang.String t2, java.lang.Long t3";
 +      if (to >= 0) {
 +        queryStr += " && writeId < t4";
 +        argStr += ", java.lang.Long t4";
 +      }
 +      query = pm.newQuery(MTableWrite.class, queryStr);
 +      query.declareParameters(argStr);
 +      query.setOrdering("writeId asc");
 +      @SuppressWarnings("unchecked")
 +      List<MTableWrite> writes = (List<MTableWrite>)(to >= 0
 +         ? query.executeWithArray(tblName, dbName, from, to)
 +         : query.executeWithArray(tblName, dbName, from));
 +      pm.retrieveAll(writes);
 +      success = true;
 +      return (writes == null || writes.isEmpty()) ? null : new ArrayList<>(writes);
 +    } finally {
 +      closeTransaction(success, query);
 +    }
 +  }
 +
 +
 +  @Override
 +  public void deleteTableWrites(
 +      String dbName, String tblName, long from, long to) throws MetaException {
 +    boolean success = false;
 +    Query query = null;
 +    openTransaction();
 +    try {
 +      query = pm.newQuery(MTableWrite.class,
 +          "table.tableName == t1 && table.database.name == t2 && writeId > t3 && writeId < t4");
 +      query.declareParameters(
 +          "java.lang.String t1, java.lang.String t2, java.lang.Long t3, java.lang.Long t4");
 +      query.deletePersistentAll(tblName, dbName, from, to);
 +      success = true;
 +    } finally {
 +      closeTransaction(success, query);
 +    }
 +  }
 +
 +  @Override
 +  public List<FullTableName > getAllMmTablesForCleanup() throws MetaException {
 +    boolean success = false;
 +    Query query = null;
 +    openTransaction();
 +    try {
 +      // If the table had no MM writes, there's nothing to clean up
 +      query = pm.newQuery(MTable.class, "mmNextWriteId > 0");
 +      @SuppressWarnings("unchecked")
 +      List<MTable> tables = (List<MTable>) query.execute();
 +      pm.retrieveAll(tables);
 +      ArrayList<FullTableName> result = new ArrayList<>(tables.size());
 +      for (MTable table : tables) {
 +        if (MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
 +          result.add(new FullTableName(table.getDatabase().getName(), table.getTableName()));
 +        }
 +      }
 +      success = true;
 +      return result;
 +    } finally {
 +      closeTransaction(success, query);
 +    }
 +  }
 +
 +  @Override
 +  public Collection<String> getAllPartitionLocations(String dbName, String tblName) {
 +    boolean success = false;
 +    Query query = null;
 +    openTransaction();
 +    try {
 +      String q = "select sd.location from org.apache.hadoop.hive.metastore.model.MPartition"
 +          + " where table.tableName == t1 && table.database.name == t2";
 +      query = pm.newQuery();
 +      query.declareParameters("java.lang.String t1, java.lang.String t2");
 +      @SuppressWarnings("unchecked")
 +      List<String> tables = (List<String>) query.execute();
 +      pm.retrieveAll(tables);
 +      success = true;
 +      return new ArrayList<>(tables);
 +    } finally {
 +      closeTransaction(success, query);
 +    }
 +  }
 +
 +  private void closeTransaction(boolean success, Query query) {
 +    if (success) {
 +      commitTransaction();
 +    } else {
 +      rollbackTransaction();
 +    }
 +    if (query != null) {
 +      query.closeAll();
 +    }
 +  }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
----------------------------------------------------------------------


[12/50] [abbrv] hive git commit: HIVE-16552: Limit the number of tasks a Spark job may contain (Reviewed by Rui)

Posted by we...@apache.org.
HIVE-16552: Limit the number of tasks a Spark job may contain (Reviewed by Rui)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c6b5ad66
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c6b5ad66
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c6b5ad66

Branch: refs/heads/hive-14535
Commit: c6b5ad663d235c15fc5bb5a24a1d3e9ac0d05140
Parents: 9e9356b
Author: Xuefu Zhang <xu...@uber.com>
Authored: Thu May 4 09:31:28 2017 -0700
Committer: Xuefu Zhang <xu...@uber.com>
Committed: Thu May 4 09:31:28 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  2 +
 .../test/resources/testconfiguration.properties |  4 +-
 .../hadoop/hive/cli/control/CliConfigs.java     |  1 +
 .../hadoop/hive/ql/exec/spark/SparkTask.java    |  6 ++
 .../spark/status/RemoteSparkJobMonitor.java     | 15 +++-
 .../ql/exec/spark/status/SparkJobMonitor.java   | 10 ++-
 .../clientnegative/spark_job_max_tasks.q        |  6 ++
 .../spark/spark_job_max_tasks.q.out             | 77 ++++++++++++++++++++
 8 files changed, 118 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c6b5ad66/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 84398c6..99c26ce 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3350,6 +3350,8 @@ public class HiveConf extends Configuration {
         "hive.spark.use.groupby.shuffle", true,
         "Spark groupByKey transformation has better performance but uses unbounded memory." +
             "Turn this off when there is a memory issue."),
+    SPARK_JOB_MAX_TASKS("hive.spark.job.max.tasks", -1, "The maximum number of tasks a Spark job may have.\n" +
+            "If a Spark job contains more tasks than the maximum, it will be cancelled. A value of -1 means no limit."),
     NWAYJOINREORDER("hive.reorder.nway.joins", true,
       "Runs reordering of tables within single n-way join (i.e.: picks streamtable)"),
     HIVE_MERGE_NWAY_JOINS("hive.merge.nway.joins", true,

http://git-wip-us.apache.org/repos/asf/hive/blob/c6b5ad66/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 753f3a9..5ab3076 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1445,4 +1445,6 @@ spark.query.negative.files=groupby2_map_skew_multi_distinct.q,\
   groupby2_multi_distinct.q,\
   groupby3_map_skew_multi_distinct.q,\
   groupby3_multi_distinct.q,\
-  groupby_grouping_sets7.q
+  groupby_grouping_sets7.q,\
+  spark_job_max_tasks.q
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c6b5ad66/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
index 67064b8..1457db0 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java
@@ -287,6 +287,7 @@ public class CliConfigs {
 
         excludesFrom(testConfigProps, "minimr.query.negative.files");
         excludeQuery("authorization_uri_import.q");
+        excludeQuery("spark_job_max_tasks.q");
 
         setResultsDir("ql/src/test/results/clientnegative");
         setLogDir("itests/qtest/target/qfile-results/clientnegative");

http://git-wip-us.apache.org/repos/asf/hive/blob/c6b5ad66/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
index 32a7730..98b1605 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
@@ -129,8 +129,14 @@ public class SparkTask extends Task<SparkWork> {
         // TODO: If the timeout is because of lack of resources in the cluster, we should
         // ideally also cancel the app request here. But w/o facilities from Spark or YARN,
         // it's difficult to do it on hive side alone. See HIVE-12650.
+        LOG.info("Failed to submit Spark job " + sparkJobID);
+        jobRef.cancelJob();
+      } else if (rc == 4) {
+        LOG.info("The number of tasks reaches above the limit " + conf.getIntVar(HiveConf.ConfVars.SPARK_JOB_MAX_TASKS) +
+            ". Cancelling Spark job " + sparkJobID + " with application ID " + jobID );
         jobRef.cancelJob();
       }
+
       if (this.jobID == null) {
         this.jobID = sparkJobStatus.getAppID();
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/c6b5ad66/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
index dd73f3e..9dfb65e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java
@@ -34,7 +34,8 @@ import org.apache.spark.JobExecutionStatus;
  * It print current job status to console and sleep current thread between monitor interval.
  */
 public class RemoteSparkJobMonitor extends SparkJobMonitor {
-
+  private int sparkJobMaxTaskCount = -1;
+  private int totalTaskCount = 0;
   private RemoteSparkJobStatus sparkJobStatus;
   private final HiveConf hiveConf;
 
@@ -42,6 +43,7 @@ public class RemoteSparkJobMonitor extends SparkJobMonitor {
     super(hiveConf);
     this.sparkJobStatus = sparkJobStatus;
     this.hiveConf = hiveConf;
+    sparkJobMaxTaskCount = hiveConf.getIntVar(HiveConf.ConfVars.SPARK_JOB_MAX_TASKS);
   }
 
   @Override
@@ -100,6 +102,17 @@ public class RemoteSparkJobMonitor extends SparkJobMonitor {
               } else {
                 console.logInfo(format);
               }
+            } else {
+              // Count the number of tasks, and kill application if it goes beyond the limit.
+              if (sparkJobMaxTaskCount != -1 && totalTaskCount == 0) {
+                totalTaskCount = getTotalTaskCount(progressMap);
+                if (totalTaskCount > sparkJobMaxTaskCount) {
+                  rc = 4;
+                  done = true;
+                  console.printInfo("\nThe total number of task in the Spark job [" + totalTaskCount + "] is greater than the limit [" +
+                      sparkJobMaxTaskCount + "]. The Spark job will be cancelled.");
+                }
+              }
             }
 
             printStatus(progressMap, lastProgressMap);

http://git-wip-us.apache.org/repos/asf/hive/blob/c6b5ad66/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
index 0b224f2..41730b5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
@@ -66,7 +66,6 @@ abstract class SparkJobMonitor {
   private int lines = 0;
   private final PrintStream out;
 
-
   private static final int COLUMN_1_WIDTH = 16;
   private static final String HEADER_FORMAT = "%16s%10s %13s  %5s  %9s  %7s  %7s  %6s  ";
   private static final String STAGE_FORMAT = "%-16s%10s %13s  %5s  %9s  %7s  %7s  %6s  ";
@@ -173,6 +172,15 @@ abstract class SparkJobMonitor {
     lastPrintTime = System.currentTimeMillis();
   }
 
+  protected int getTotalTaskCount(Map<String, SparkStageProgress> progressMap) {
+    int totalTasks = 0;
+    for (SparkStageProgress progress: progressMap.values() ) {
+      totalTasks += progress.getTotalTaskCount();
+    }
+
+    return totalTasks;
+  }
+
   private String getReport(Map<String, SparkStageProgress> progressMap) {
     StringBuilder reportBuffer = new StringBuilder();
     SimpleDateFormat dt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");

http://git-wip-us.apache.org/repos/asf/hive/blob/c6b5ad66/ql/src/test/queries/clientnegative/spark_job_max_tasks.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/spark_job_max_tasks.q b/ql/src/test/queries/clientnegative/spark_job_max_tasks.q
new file mode 100644
index 0000000..7473050
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/spark_job_max_tasks.q
@@ -0,0 +1,6 @@
+set hive.spark.job.max.tasks=2;
+
+EXPLAIN
+SELECT key, sum(value) AS s FROM src1 GROUP BY key ORDER BY s;
+
+SELECT key, sum(value) AS s FROM src1 GROUP BY key ORDER BY s;

http://git-wip-us.apache.org/repos/asf/hive/blob/c6b5ad66/ql/src/test/results/clientnegative/spark/spark_job_max_tasks.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/spark/spark_job_max_tasks.q.out b/ql/src/test/results/clientnegative/spark/spark_job_max_tasks.q.out
new file mode 100644
index 0000000..ba2f09e
--- /dev/null
+++ b/ql/src/test/results/clientnegative/spark/spark_job_max_tasks.q.out
@@ -0,0 +1,77 @@
+PREHOOK: query: EXPLAIN
+SELECT key, sum(value) AS s FROM src1 GROUP BY key ORDER BY s
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key, sum(value) AS s FROM src1 GROUP BY key ORDER BY s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 2)
+        Reducer 3 <- Reducer 2 (SORT, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: sum(value)
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: double)
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col1 (type: double)
+                  sort order: +
+                  Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key, sum(value) AS s FROM src1 GROUP BY key ORDER BY s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+FAILED: Execution Error, return code 4 from org.apache.hadoop.hive.ql.exec.spark.SparkTask


[14/50] [abbrv] hive git commit: HIVE-16449: BeeLineDriver should handle query result sorting (Peter Vary via Zoltan Haindrich)

Posted by we...@apache.org.
HIVE-16449: BeeLineDriver should handle query result sorting (Peter Vary via Zoltan Haindrich)

Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/44804d82
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/44804d82
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/44804d82

Branch: refs/heads/hive-14535
Commit: 44804d82f5c1226b0247680954fdd22fc3b200bd
Parents: f8f9155
Author: Peter Vary <pv...@cloudera.com>
Authored: Fri May 5 13:01:31 2017 +0200
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Fri May 5 13:02:01 2017 +0200

----------------------------------------------------------------------
 .../java/org/apache/hive/beeline/Commands.java  | 12 +++
 .../org/apache/hive/beeline/OutputFile.java     | 74 +++++++++++++--
 .../hive/cli/control/CoreBeeLineDriver.java     |  4 +-
 .../hive/beeline/ConvertedOutputFile.java       | 94 ++++++++++++++++++++
 .../java/org/apache/hive/beeline/QFile.java     | 17 ++++
 .../apache/hive/beeline/QFileBeeLineClient.java | 20 +++--
 .../clientpositive/beeline/smb_mapjoin_1.q.out  |  8 +-
 .../clientpositive/beeline/smb_mapjoin_2.q.out  | 16 ++--
 .../clientpositive/beeline/smb_mapjoin_3.q.out  | 28 +++---
 9 files changed, 232 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/beeline/src/java/org/apache/hive/beeline/Commands.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 08d53ca..407e018 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -1003,6 +1003,15 @@ public class Commands {
         beeLine.showWarnings();
 
         if (hasResults) {
+          OutputFile outputFile = beeLine.getRecordOutputFile();
+          if (beeLine.isTestMode() && outputFile != null && outputFile.isActiveConverter()) {
+            outputFile.fetchStarted();
+            if (!sql.trim().toLowerCase().startsWith("explain")) {
+              outputFile.foundQuery(true);
+            } else {
+              outputFile.foundQuery(false);
+            }
+          }
           do {
             ResultSet rs = stmnt.getResultSet();
             try {
@@ -1020,6 +1029,9 @@ public class Commands {
               rs.close();
             }
           } while (BeeLine.getMoreResults(stmnt));
+          if (beeLine.isTestMode() && outputFile != null && outputFile.isActiveConverter()) {
+            outputFile.fetchFinished();
+          }
         } else {
           int count = stmnt.getUpdateCount();
           long end = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/beeline/src/java/org/apache/hive/beeline/OutputFile.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/OutputFile.java b/beeline/src/java/org/apache/hive/beeline/OutputFile.java
index 1014af3..3d6c335 100644
--- a/beeline/src/java/org/apache/hive/beeline/OutputFile.java
+++ b/beeline/src/java/org/apache/hive/beeline/OutputFile.java
@@ -22,23 +22,83 @@
  */
 package org.apache.hive.beeline;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.io.File;
-import java.io.FileWriter;
 import java.io.IOException;
-import java.io.PrintWriter;
+import java.io.PrintStream;
 
 public class OutputFile {
-  final File file;
-  final PrintWriter out;
+  private final PrintStream out;
+  private final String filename;
 
   public OutputFile(String filename) throws IOException {
-    file = new File(filename);
-    out = new PrintWriter(new FileWriter(file));
+    File file = new File(filename);
+    this.filename = file.getAbsolutePath();
+    this.out = new PrintStream(file, "UTF-8");
+  }
+
+  @VisibleForTesting
+  protected PrintStream getOut() {
+    return out;
+  }
+
+  @VisibleForTesting
+  protected String getFilename() {
+    return filename;
+  }
+
+  /**
+   * Constructor used by the decorating classes in tests.
+   * @param out The output stream
+   * @param filename The filename, to use in the toString() method
+   */
+  @VisibleForTesting
+  protected OutputFile(PrintStream out, String filename) {
+    this.out = out;
+    this.filename = filename;
+  }
+
+  /**
+   * Returns true if a FetchConverter is defined for writing the results. Should be used only for
+   * testing, otherwise returns false.
+   * @return True if a FetchConverter is active
+   */
+  boolean isActiveConverter() {
+    return false;
+  }
+
+  /**
+   * Indicates that result fetching is started, and the converter should be activated. The
+   * Converter starts to collect the data when the fetch is started, and prints out the
+   * converted data when the fetch is finished. Converter will collect data only if
+   * fetchStarted, and foundQuery is true.
+   */
+  void fetchStarted() {
+    // no-op for default output file
+  }
+
+  /**
+   * Indicates that the following data will be a query result, and the converter should be
+   * activated. Converter will collect the data only if fetchStarted, and foundQuery is true.
+   * @param foundQuery The following data will be a query result (true) or not (false)
+   */
+  void foundQuery(boolean foundQuery) {
+    // no-op for default output file
+  }
+
+  /**
+   * Indicates that the previously collected data should be converted and written. Converter
+   * starts to collect the data when the fetch is started, and prints out the converted data when
+   * the fetch is finished.
+   */
+  void fetchFinished() {
+    // no-op for default output file
   }
 
   @Override
   public String toString() {
-    return file.getAbsolutePath();
+    return filename;
   }
 
   public void addLine(String command) {

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
index 8c7057c..2be83ca 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreBeeLineDriver.java
@@ -23,6 +23,7 @@ import com.google.common.base.Strings;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.hooks.PreExecutePrinter;
+import org.apache.hive.beeline.ConvertedOutputFile.Converter;
 import org.apache.hive.beeline.QFile;
 import org.apache.hive.beeline.QFile.QFileBuilder;
 import org.apache.hive.beeline.QFileBeeLineClient;
@@ -118,7 +119,8 @@ public class CoreBeeLineDriver extends CliAdapter {
             "set test.script.dir=" + testScriptDirectory + ";",
             "!run " + script,
           },
-          log);
+          log,
+          Converter.NONE);
     } catch (Exception e) {
       throw new SQLException("Error running infra script: " + script
           + "\nCheck the following logs for details:\n - " + beeLineOutput + "\n - " + log, e);

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/itests/util/src/main/java/org/apache/hive/beeline/ConvertedOutputFile.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/ConvertedOutputFile.java b/itests/util/src/main/java/org/apache/hive/beeline/ConvertedOutputFile.java
new file mode 100644
index 0000000..ffca4ae
--- /dev/null
+++ b/itests/util/src/main/java/org/apache/hive/beeline/ConvertedOutputFile.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.beeline;
+
+import org.apache.hadoop.hive.common.io.DigestPrintStream;
+import org.apache.hadoop.hive.common.io.FetchConverter;
+import org.apache.hadoop.hive.common.io.SortAndDigestPrintStream;
+import org.apache.hadoop.hive.common.io.SortPrintStream;
+
+import java.io.PrintStream;
+
+/**
+ * Class for representing an OutputFile, into which the writes are converted by the existing
+ * FetchConverters.
+ */
+public class ConvertedOutputFile extends OutputFile {
+  private final boolean isActiveFetchConverter;
+
+  public ConvertedOutputFile(OutputFile inner, Converter converter) throws Exception {
+    super(converter.getConvertedPrintStream(inner.getOut()), inner.getFilename());
+    isActiveFetchConverter = (getOut() instanceof FetchConverter);
+  }
+
+  @Override
+  boolean isActiveConverter() {
+    return isActiveFetchConverter;
+  }
+
+  @Override
+  void fetchStarted() {
+    if (isActiveFetchConverter) {
+      ((FetchConverter) getOut()).fetchStarted();
+    }
+  }
+
+  @Override
+  void foundQuery(boolean foundQuery) {
+    if (isActiveFetchConverter) {
+      ((FetchConverter) getOut()).foundQuery(foundQuery);
+    }
+  }
+
+  @Override
+  void fetchFinished() {
+    if (isActiveFetchConverter) {
+      ((FetchConverter) getOut()).fetchFinished();
+    }
+  }
+
+  /**
+   * The supported type of converters pointing to a specific FetchConverter class, and the method
+   * which provides the actual converted stream.
+   */
+  public enum Converter {
+    SORT_QUERY_RESULTS {
+      public PrintStream getConvertedPrintStream(PrintStream inner) throws Exception {
+        return new SortPrintStream(inner, "UTF-8");
+      }
+    },
+    HASH_QUERY_RESULTS {
+      public PrintStream getConvertedPrintStream(PrintStream inner) throws Exception {
+        return new DigestPrintStream(inner, "UTF-8");
+      }
+    },
+    SORT_AND_HASH_QUERY_RESULTS {
+      public PrintStream getConvertedPrintStream(PrintStream inner) throws Exception {
+        return new SortAndDigestPrintStream(inner, "UTF-8");
+      }
+    },
+    NONE {
+      public PrintStream getConvertedPrintStream(PrintStream inner) throws Exception {
+        return inner;
+      }
+    };
+
+    public abstract PrintStream getConvertedPrintStream(PrintStream inner) throws Exception;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/QFile.java b/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
index 0bde529..3d9ca99 100644
--- a/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
+++ b/itests/util/src/main/java/org/apache/hive/beeline/QFile.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.QTestProcessExecResult;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.util.Shell;
 import org.apache.hive.common.util.StreamPrinter;
+import org.apache.hive.beeline.ConvertedOutputFile.Converter;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -72,6 +73,7 @@ public final class QFile {
   private static RegexFilterSet staticFilterSet = getStaticFilterSet();
   private RegexFilterSet specificFilterSet;
   private boolean rewriteSourceTables;
+  private Converter converter;
 
   private QFile() {}
 
@@ -107,6 +109,10 @@ public final class QFile {
     return afterExecuteLogFile;
   }
 
+  public Converter getConverter() {
+    return converter;
+  }
+
   public String getDebugHint() {
     return String.format(DEBUG_HINT, inputFile, rawOutputFile, outputFile, expectedOutputFile,
         logFile, beforeExecuteLogFile, afterExecuteLogFile,
@@ -327,6 +333,17 @@ public final class QFile {
           .addFilter("(PREHOOK|POSTHOOK): (Output|Input): " + name + "@", "$1: $2: default@")
           .addFilter("name(:?) " + name + "\\.(.*)\n", "name$1 default.$2\n")
           .addFilter("/" + name + ".db/", "/");
+      result.converter = Converter.NONE;
+      String input = FileUtils.readFileToString(result.inputFile, "UTF-8");
+      if (input.contains("-- SORT_QUERY_RESULTS")) {
+        result.converter = Converter.SORT_QUERY_RESULTS;
+      }
+      if (input.contains("-- HASH_QUERY_RESULTS")) {
+        result.converter = Converter.HASH_QUERY_RESULTS;
+      }
+      if (input.contains("-- SORT_AND_HASH_QUERY_RESULTS")) {
+        result.converter = Converter.SORT_AND_HASH_QUERY_RESULTS;
+      }
       return result;
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java b/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java
index f1b53f7..7c50e18 100644
--- a/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java
+++ b/itests/util/src/main/java/org/apache/hive/beeline/QFileBeeLineClient.java
@@ -18,6 +18,8 @@
 
 package org.apache.hive.beeline;
 
+import org.apache.hive.beeline.ConvertedOutputFile.Converter;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
@@ -50,11 +52,13 @@ public class QFileBeeLineClient implements AutoCloseable {
         });
   }
 
-  public void execute(String[] commands, File resultFile) throws SQLException {
+  public void execute(String[] commands, File resultFile, Converter converter)
+      throws Exception {
     beeLine.runCommands(
         new String[] {
           "!record " + resultFile.getAbsolutePath()
         });
+    beeLine.setRecordOutputFile(new ConvertedOutputFile(beeLine.getRecordOutputFile(), converter));
 
     int lastSuccessfulCommand = beeLine.runCommands(commands);
     if (commands.length != lastSuccessfulCommand) {
@@ -64,7 +68,7 @@ public class QFileBeeLineClient implements AutoCloseable {
     beeLine.runCommands(new String[] {"!record"});
   }
 
-  private void beforeExecute(QFile qFile) throws SQLException {
+  private void beforeExecute(QFile qFile) throws Exception {
     execute(
         new String[] {
           "!set outputformat tsv2",
@@ -79,11 +83,12 @@ public class QFileBeeLineClient implements AutoCloseable {
           "set hive.in.test.short.logs=true;",
           "set hive.in.test.remove.logs=false;",
         },
-        qFile.getBeforeExecuteLogFile());
+        qFile.getBeforeExecuteLogFile(),
+        Converter.NONE);
     beeLine.setIsTestMode(true);
   }
 
-  private void afterExecute(QFile qFile) throws SQLException {
+  private void afterExecute(QFile qFile) throws Exception {
     beeLine.setIsTestMode(false);
     execute(
         new String[] {
@@ -95,13 +100,14 @@ public class QFileBeeLineClient implements AutoCloseable {
           "USE default;",
           "DROP DATABASE IF EXISTS `" + qFile.getName() + "` CASCADE;",
         },
-        qFile.getAfterExecuteLogFile());
+        qFile.getAfterExecuteLogFile(),
+        Converter.NONE);
   }
 
-  public void execute(QFile qFile) throws SQLException, IOException {
+  public void execute(QFile qFile) throws Exception {
     beforeExecute(qFile);
     String[] commands = beeLine.getCommands(qFile.getInputFile());
-    execute(qFile.filterCommands(commands), qFile.getRawOutputFile());
+    execute(qFile.filterCommands(commands), qFile.getRawOutputFile(), qFile.getConverter());
     afterExecute(qFile);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
index c943b03..40df1c3 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_1.q.out
@@ -150,10 +150,10 @@ POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_2
 #### A masked pattern was here ####
 1	val_1	NULL	NULL
+10	val_10	NULL	NULL
 3	val_3	NULL	NULL
 4	val_4	NULL	NULL
 5	val_5	NULL	NULL
-10	val_10	NULL	NULL
 PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -259,10 +259,10 @@ POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_2
 #### A masked pattern was here ####
 1	val_1	NULL	NULL
+10	val_10	NULL	NULL
 3	val_3	NULL	NULL
 4	val_4	NULL	NULL
 5	val_5	NULL	NULL
-10	val_10	NULL	NULL
 NULL	NULL	20	val_20
 NULL	NULL	23	val_23
 NULL	NULL	25	val_25
@@ -371,10 +371,10 @@ POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_2
 #### A masked pattern was here ####
 1	val_1	NULL	NULL
+10	val_10	NULL	NULL
 3	val_3	NULL	NULL
 4	val_4	NULL	NULL
 5	val_5	NULL	NULL
-10	val_10	NULL	NULL
 PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -480,10 +480,10 @@ POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_2
 #### A masked pattern was here ####
 1	val_1	NULL	NULL
+10	val_10	NULL	NULL
 3	val_3	NULL	NULL
 4	val_4	NULL	NULL
 5	val_5	NULL	NULL
-10	val_10	NULL	NULL
 NULL	NULL	20	val_20
 NULL	NULL	23	val_23
 NULL	NULL	25	val_25

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
index 1ea6553..7840905 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_2.q.out
@@ -99,8 +99,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
-4	val_4	4	val_4
 10	val_10	10	val_10
+4	val_4	4	val_4
 PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -152,10 +152,10 @@ POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
 1	val_1	NULL	NULL
+10	val_10	10	val_10
 3	val_3	NULL	NULL
 4	val_4	4	val_4
 5	val_5	NULL	NULL
-10	val_10	10	val_10
 PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -206,8 +206,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
-4	val_4	4	val_4
 10	val_10	10	val_10
+4	val_4	4	val_4
 NULL	NULL	17	val_17
 NULL	NULL	19	val_19
 NULL	NULL	20	val_20
@@ -263,10 +263,10 @@ POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
 1	val_1	NULL	NULL
+10	val_10	10	val_10
 3	val_3	NULL	NULL
 4	val_4	4	val_4
 5	val_5	NULL	NULL
-10	val_10	10	val_10
 NULL	NULL	17	val_17
 NULL	NULL	19	val_19
 NULL	NULL	20	val_20
@@ -324,8 +324,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
-4	val_4	4	val_4
 10	val_10	10	val_10
+4	val_4	4	val_4
 PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a left outer join smb_bucket_3 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -377,10 +377,10 @@ POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
 1	val_1	NULL	NULL
+10	val_10	10	val_10
 3	val_3	NULL	NULL
 4	val_4	4	val_4
 5	val_5	NULL	NULL
-10	val_10	10	val_10
 PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_1 a right outer join smb_bucket_3 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -431,8 +431,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
-4	val_4	4	val_4
 10	val_10	10	val_10
+4	val_4	4	val_4
 NULL	NULL	17	val_17
 NULL	NULL	19	val_19
 NULL	NULL	20	val_20
@@ -488,10 +488,10 @@ POSTHOOK: Input: default@smb_bucket_1
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
 1	val_1	NULL	NULL
+10	val_10	10	val_10
 3	val_3	NULL	NULL
 4	val_4	4	val_4
 5	val_5	NULL	NULL
-10	val_10	10	val_10
 NULL	NULL	17	val_17
 NULL	NULL	19	val_19
 NULL	NULL	20	val_20

http://git-wip-us.apache.org/repos/asf/hive/blob/44804d82/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
index f639ba4..cda600b 100644
--- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
+++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_3.q.out
@@ -205,12 +205,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_2
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
-NULL	NULL	4	val_4
+20	val_20	20	val_20
+23	val_23	23	val_23
 NULL	NULL	10	val_10
 NULL	NULL	17	val_17
 NULL	NULL	19	val_19
-20	val_20	20	val_20
-23	val_23	23	val_23
+NULL	NULL	4	val_4
 PREHOOK: query: explain
 select /*+mapjoin(a)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -261,14 +261,14 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_2
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
-NULL	NULL	4	val_4
-NULL	NULL	10	val_10
-NULL	NULL	17	val_17
-NULL	NULL	19	val_19
 20	val_20	20	val_20
 23	val_23	23	val_23
 25	val_25	NULL	NULL
 30	val_30	NULL	NULL
+NULL	NULL	10	val_10
+NULL	NULL	17	val_17
+NULL	NULL	19	val_19
+NULL	NULL	4	val_4
 PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_2 a join smb_bucket_3 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -428,12 +428,12 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_2
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
-NULL	NULL	4	val_4
+20	val_20	20	val_20
+23	val_23	23	val_23
 NULL	NULL	10	val_10
 NULL	NULL	17	val_17
 NULL	NULL	19	val_19
-20	val_20	20	val_20
-23	val_23	23	val_23
+NULL	NULL	4	val_4
 PREHOOK: query: explain
 select /*+mapjoin(b)*/ * from smb_bucket_2 a full outer join smb_bucket_3 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -484,11 +484,11 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_2
 POSTHOOK: Input: default@smb_bucket_3
 #### A masked pattern was here ####
-NULL	NULL	4	val_4
-NULL	NULL	10	val_10
-NULL	NULL	17	val_17
-NULL	NULL	19	val_19
 20	val_20	20	val_20
 23	val_23	23	val_23
 25	val_25	NULL	NULL
 30	val_30	NULL	NULL
+NULL	NULL	10	val_10
+NULL	NULL	17	val_17
+NULL	NULL	19	val_19
+NULL	NULL	4	val_4


[36/50] [abbrv] hive git commit: HIVE-16598 : LlapServiceDriver - create directories and warn of errors (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by we...@apache.org.
HIVE-16598 : LlapServiceDriver - create directories and warn of errors (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e4856ca0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e4856ca0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e4856ca0

Branch: refs/heads/hive-14535
Commit: e4856ca031ce4776ea8738824e57b213394a7735
Parents: 57beac4
Author: sergey <se...@apache.org>
Authored: Mon May 8 12:34:02 2017 -0700
Committer: sergey <se...@apache.org>
Committed: Mon May 8 12:34:02 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/llap/cli/LlapServiceDriver.java    | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e4856ca0/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
index a662c75..6bf9550 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
@@ -349,7 +349,15 @@ public class LlapServiceDriver {
       final Path tezDir = new Path(libDir, "tez");
       final Path udfDir = new Path(libDir, "udfs");
       final Path confPath = new Path(tmpDir, "conf");
-      lfs.mkdirs(confPath);
+      if (!lfs.mkdirs(confPath)) {
+        LOG.warn("mkdirs for " + confPath + " returned false");
+      }
+      if (!lfs.mkdirs(tezDir)) {
+        LOG.warn("mkdirs for " + tezDir + " returned false");
+      }
+      if (!lfs.mkdirs(udfDir)) {
+        LOG.warn("mkdirs for " + udfDir + " returned false");
+      }
 
       NamedCallable<Void> downloadTez = new NamedCallable<Void>("downloadTez") {
         @Override


[17/50] [abbrv] hive git commit: HIVE-16578: Semijoin Hints should use column name, if provided for partition key check (Deepak Jaiswal, reviewed by Jason Dere)

Posted by we...@apache.org.
HIVE-16578: Semijoin Hints should use column name, if provided for partition key check (Deepak Jaiswal, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/699d6ce3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/699d6ce3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/699d6ce3

Branch: refs/heads/hive-14535
Commit: 699d6ce36deb71ddc04a9ef7bec434ef4d2409b1
Parents: d09f3f8
Author: Jason Dere <jd...@hortonworks.com>
Authored: Fri May 5 16:35:32 2017 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Fri May 5 16:35:32 2017 -0700

----------------------------------------------------------------------
 .../DynamicPartitionPruningOptimization.java    | 122 +++---
 .../test/queries/clientpositive/semijoin_hint.q |  17 +-
 .../llap/dynamic_semijoin_reduction.q.out       |  28 +-
 .../llap/dynamic_semijoin_reduction_2.q.out     |   4 +-
 .../llap/dynamic_semijoin_reduction_3.q.out     |   2 +-
 .../llap/dynamic_semijoin_user_level.q.out      |  16 +-
 .../clientpositive/llap/semijoin_hint.q.out     | 388 ++++++++++++++++---
 7 files changed, 444 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/699d6ce3/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
index b8c0102..d4ca78c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
@@ -220,23 +220,34 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
             }
             String tableAlias = (op == null ? "" : ((TableScanOperator) op).getConf().getAlias());
 
-            keyBaseAlias = ctx.generator.getOperatorId() + "_" + tableAlias + "_" + column;
-
-            Map<String, SemiJoinHint> hints = parseContext.getSemiJoinHints();
-            if (hints != null) {
-              // If hints map has no entry that would imply that user enforced
-              // no runtime filtering.
-              if (hints.size() > 0) {
-                SemiJoinHint sjHint = hints.get(tableAlias);
-                semiJoinAttempted = generateSemiJoinOperatorPlan(
-                        ctx, parseContext, ts, keyBaseAlias, sjHint);
-                if (!semiJoinAttempted && sjHint != null) {
-                  throw new SemanticException("The user hint to enforce semijoin failed required conditions");
+            StringBuilder internalColNameBuilder = new StringBuilder();
+            StringBuilder colNameBuilder = new StringBuilder();
+            if (getColumnName(ctx, internalColNameBuilder, colNameBuilder)) {
+              String colName = colNameBuilder.toString();
+              keyBaseAlias = ctx.generator.getOperatorId() + "_" + tableAlias
+                      + "_" + colName;
+              Map<String, SemiJoinHint> hints = parseContext.getSemiJoinHints();
+              if (hints != null) {
+                if (hints.size() > 0) {
+                  SemiJoinHint sjHint = hints.get(tableAlias);
+                  if (sjHint != null && sjHint.getColName() != null &&
+                          !colName.equals(sjHint.getColName())) {
+                    LOG.debug("Removed hint due to column mismatch + Col = " + colName + " hint column = " + sjHint.getColName());
+                    sjHint = null;
+                  }
+                  semiJoinAttempted = generateSemiJoinOperatorPlan(
+                          ctx, parseContext, ts, keyBaseAlias,
+                          internalColNameBuilder.toString(), colName, sjHint);
+                  if (!semiJoinAttempted && sjHint != null) {
+                    throw new SemanticException("The user hint to enforce semijoin failed required conditions");
+                  }
                 }
+              } else {
+                // fallback to regular logic
+                semiJoinAttempted = generateSemiJoinOperatorPlan(
+                        ctx, parseContext, ts, keyBaseAlias,
+                        internalColNameBuilder.toString(), colName, null);
               }
-            } else {
-              semiJoinAttempted = generateSemiJoinOperatorPlan(
-                      ctx, parseContext, ts, keyBaseAlias, null);
             }
           }
         }
@@ -285,6 +296,34 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
     return false;
   }
 
+  // Given a key, find the corresponding column name.
+  private boolean getColumnName(DynamicListContext ctx, StringBuilder internalColName,
+                                StringBuilder colName) {
+    ExprNodeDesc exprNodeDesc = ctx.generator.getConf().getKeyCols().get(ctx.desc.getKeyIndex());
+    ExprNodeColumnDesc colExpr = ExprNodeDescUtils.getColumnExpr(exprNodeDesc);
+
+    if (colExpr == null) {
+      return false;
+    }
+
+    internalColName.append(colExpr.getColumn());
+    Operator<? extends OperatorDesc> parentOfRS = ctx.generator.getParentOperators().get(0);
+    if (!(parentOfRS instanceof SelectOperator)) {
+      colName.append(internalColName.toString());
+      return true;
+    }
+
+    exprNodeDesc = parentOfRS.getColumnExprMap().get(internalColName.toString());
+    colExpr = ExprNodeDescUtils.getColumnExpr(exprNodeDesc);
+
+    if (colExpr == null) {
+      return false;
+    }
+
+    colName.append(ExprNodeDescUtils.extractColName(colExpr));
+    return true;
+  }
+
   private void replaceExprNode(DynamicListContext ctx, FilterDesc desc, ExprNodeDesc node) {
     if (ctx.grandParent == null) {
       desc.setPredicate(node);
@@ -400,7 +439,8 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
 
   // Generates plan for min/max when dynamic partition pruning is ruled out.
   private boolean generateSemiJoinOperatorPlan(DynamicListContext ctx, ParseContext parseContext,
-      TableScanOperator ts, String keyBaseAlias, SemiJoinHint sjHint) throws SemanticException {
+      TableScanOperator ts, String keyBaseAlias, String internalColName,
+      String colName, SemiJoinHint sjHint) throws SemanticException {
 
     // If semijoin hint is enforced, make sure hint is provided
     if (parseContext.getConf().getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION_HINT_ONLY)
@@ -414,52 +454,20 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
     // we need the expr that generated the key of the reduce sink
     ExprNodeDesc key = ctx.generator.getConf().getKeyCols().get(ctx.desc.getKeyIndex());
 
-    String internalColName = null;
-    ExprNodeDesc exprNodeDesc = key;
-    // Find the ExprNodeColumnDesc
-    while (!(exprNodeDesc instanceof ExprNodeColumnDesc) &&
-            (exprNodeDesc.getChildren() != null)) {
-      exprNodeDesc = exprNodeDesc.getChildren().get(0);
+    assert colName != null;
+    // Fetch the TableScan Operator.
+    Operator<?> op = parentOfRS;
+    while (!(op == null || op instanceof TableScanOperator)) {
+      op = op.getParentOperators().get(0);
     }
+    assert op != null;
 
-    if (!(exprNodeDesc instanceof ExprNodeColumnDesc)) {
-      // No column found!
-      // Bail out
+    Table table = ((TableScanOperator) op).getConf().getTableMetadata();
+    if (table.isPartitionKey(colName)) {
+      // The column is partition column, skip the optimization.
       return false;
     }
 
-    internalColName = ((ExprNodeColumnDesc) exprNodeDesc).getColumn();
-    if (parentOfRS instanceof SelectOperator) {
-      // Make sure the semijoin branch is not on partition column.
-      ExprNodeDesc expr = parentOfRS.getColumnExprMap().get(internalColName);
-      while (!(expr instanceof ExprNodeColumnDesc) &&
-              (expr.getChildren() != null)) {
-        expr = expr.getChildren().get(0);
-      }
-
-      if (!(expr instanceof ExprNodeColumnDesc)) {
-        // No column found!
-        // Bail out
-        return false;
-      }
-
-      ExprNodeColumnDesc colExpr = (ExprNodeColumnDesc) expr;
-      String colName = ExprNodeDescUtils.extractColName(colExpr);
-
-      // Fetch the TableScan Operator.
-      Operator<?> op = parentOfRS.getParentOperators().get(0);
-      while (op != null && !(op instanceof TableScanOperator)) {
-        op = op.getParentOperators().get(0);
-      }
-      assert op != null;
-
-      Table table = ((TableScanOperator) op).getConf().getTableMetadata();
-      if (table.isPartitionKey(colName)) {
-        // The column is partition column, skip the optimization.
-        return false;
-      }
-    }
-
     // If hint is provided and only hinted semijoin optimizations should be
     // created, then skip other columns on the table
     if (parseContext.getConf().getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION_HINT_ONLY)

http://git-wip-us.apache.org/repos/asf/hive/blob/699d6ce3/ql/src/test/queries/clientpositive/semijoin_hint.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/semijoin_hint.q b/ql/src/test/queries/clientpositive/semijoin_hint.q
index a3cd1d6..5fbc273 100644
--- a/ql/src/test/queries/clientpositive/semijoin_hint.q
+++ b/ql/src/test/queries/clientpositive/semijoin_hint.q
@@ -54,6 +54,12 @@ EXPLAIN select  /*+ semi(i, 3000)*/ count(*) from srcpart_date k join srcpart_sm
 
 explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
 
+set hive.tez.dynamic.semijoin.reduction.hint.only=true;
+-- This should NOT create a semijoin as the join is on different column
+explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1);
+set hive.tez.dynamic.semijoin.reduction.hint.only=false;
+
+
 set hive.cbo.returnpath.hiveop=false;
 
 explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
@@ -70,6 +76,12 @@ EXPLAIN select  /*+ semi(i, 3000)*/ count(*) from srcpart_date k join srcpart_sm
 
 explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
 
+set hive.tez.dynamic.semijoin.reduction.hint.only=true;
+-- This should NOT create a semijoin as the join is on different column
+explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1);
+set hive.tez.dynamic.semijoin.reduction.hint.only=false;
+
+
 set hive.cbo.enable=false;
 
 explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
@@ -86,4 +98,7 @@ EXPLAIN select  /*+ semi(i, 3000)*/ count(*) from srcpart_date k join srcpart_sm
 
 explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
 
-
+set hive.tez.dynamic.semijoin.reduction.hint.only=true;
+-- This should NOT create a semijoin as the join is on different column
+explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1);
+set hive.tez.dynamic.semijoin.reduction.hint.only=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/699d6ce3/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out
index 1d1f86b..e3ffcfa 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction.q.out
@@ -288,10 +288,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date
-                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter)))) (type: boolean)
+                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter)))) (type: boolean)
+                    predicate: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
@@ -704,10 +704,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: alltypesorc_int
-                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_small_cstring_min) AND DynamicValue(RS_10_srcpart_small_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_small_cstring_bloom_filter))) and (cstring BETWEEN DynamicValue(RS_11_srcpart_date_cstring_min) AND DynamicValue(RS_11_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_11_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_small_key1_bloom_filter))) and (cstring BETWEEN DynamicValue(RS_11_srcpart_date_key_min) AND DynamicValue(RS_11_srcpart_date_key_max) and in_bloom_filter(cstring, DynamicValue(RS_11_srcpart_date_key_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 12288 Data size: 862450 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_small_cstring_min) AND DynamicValue(RS_10_srcpart_small_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_small_cstring_bloom_filter))) and (cstring BETWEEN DynamicValue(RS_11_srcpart_date_cstring_min) AND DynamicValue(RS_11_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_11_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_small_key1_bloom_filter))) and (cstring BETWEEN DynamicValue(RS_11_srcpart_date_key_min) AND DynamicValue(RS_11_srcpart_date_key_max) and in_bloom_filter(cstring, DynamicValue(RS_11_srcpart_date_key_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cstring (type: string)
@@ -757,10 +757,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date
-                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key_min) AND DynamicValue(RS_10_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key_bloom_filter)))) (type: boolean)
+                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (key is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key_min) AND DynamicValue(RS_10_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key_bloom_filter)))) (type: boolean)
+                    predicate: (key is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)
@@ -1019,10 +1019,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date
-                  filterExpr: (key is not null and value is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter))) and (value BETWEEN DynamicValue(RS_7_srcpart_small_value_min) AND DynamicValue(RS_7_srcpart_small_value_max) and in_bloom_filter(value, DynamicValue(RS_7_srcpart_small_value_bloom_filter)))) (type: boolean)
+                  filterExpr: (key is not null and value is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter))) and (value BETWEEN DynamicValue(RS_7_srcpart_small_value1_min) AND DynamicValue(RS_7_srcpart_small_value1_max) and in_bloom_filter(value, DynamicValue(RS_7_srcpart_small_value1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (key is not null and value is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter))) and (value BETWEEN DynamicValue(RS_7_srcpart_small_value_min) AND DynamicValue(RS_7_srcpart_small_value_max) and in_bloom_filter(value, DynamicValue(RS_7_srcpart_small_value_bloom_filter)))) (type: boolean)
+                    predicate: (key is not null and value is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter))) and (value BETWEEN DynamicValue(RS_7_srcpart_small_value1_min) AND DynamicValue(RS_7_srcpart_small_value1_max) and in_bloom_filter(value, DynamicValue(RS_7_srcpart_small_value1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -1348,10 +1348,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date
-                  filterExpr: (key is not null and value is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key_min) AND DynamicValue(RS_10_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key_bloom_filter)))) (type: boolean)
+                  filterExpr: (key is not null and value is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (key is not null and value is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key_min) AND DynamicValue(RS_10_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key_bloom_filter)))) (type: boolean)
+                    predicate: (key is not null and value is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -1402,10 +1402,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: alltypesorc_int
-                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_12_srcpart_date_cstring_min) AND DynamicValue(RS_12_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_12_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_12_srcpart_date__col1_min) AND DynamicValue(RS_12_srcpart_date__col1_max) and in_bloom_filter(cstring, DynamicValue(RS_12_srcpart_date__col1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 12288 Data size: 862450 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_12_srcpart_date_cstring_min) AND DynamicValue(RS_12_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_12_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_12_srcpart_date__col1_min) AND DynamicValue(RS_12_srcpart_date__col1_max) and in_bloom_filter(cstring, DynamicValue(RS_12_srcpart_date__col1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cstring (type: string)
@@ -1556,12 +1556,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date
-                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter)))) (type: boolean)
+                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter)))) (type: boolean)
+                    predicate: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/699d6ce3/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out
index a5fdd90..cb69251 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out
@@ -105,10 +105,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: tt2
-                  filterExpr: (timestamp_col_18 is not null and decimal1911_col_16 is not null and (timestamp_col_18 BETWEEN DynamicValue(RS_23_t1_timestamp_col_18_min) AND DynamicValue(RS_23_t1_timestamp_col_18_max) and in_bloom_filter(timestamp_col_18, DynamicValue(RS_23_t1_timestamp_col_18_bloom_filter)))) (type: boolean)
+                  filterExpr: (timestamp_col_18 is not null and decimal1911_col_16 is not null and (timestamp_col_18 BETWEEN DynamicValue(RS_23_t1__col3_min) AND DynamicValue(RS_23_t1__col3_max) and in_bloom_filter(timestamp_col_18, DynamicValue(RS_23_t1__col3_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (timestamp_col_18 is not null and decimal1911_col_16 is not null and (timestamp_col_18 BETWEEN DynamicValue(RS_23_t1_timestamp_col_18_min) AND DynamicValue(RS_23_t1_timestamp_col_18_max) and in_bloom_filter(timestamp_col_18, DynamicValue(RS_23_t1_timestamp_col_18_bloom_filter)))) (type: boolean)
+                    predicate: (timestamp_col_18 is not null and decimal1911_col_16 is not null and (timestamp_col_18 BETWEEN DynamicValue(RS_23_t1__col3_min) AND DynamicValue(RS_23_t1__col3_max) and in_bloom_filter(timestamp_col_18, DynamicValue(RS_23_t1__col3_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: decimal1911_col_16 (type: decimal(19,11)), timestamp_col_18 (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/699d6ce3/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out
index 8950b70..2c1f6e5 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out
@@ -517,7 +517,7 @@ STAGE PLANS:
                   alias: t
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (a BETWEEN DynamicValue(RS_10_nonacidorctbl_a_min) AND DynamicValue(RS_10_nonacidorctbl_a_max) and in_bloom_filter(a, DynamicValue(RS_10_nonacidorctbl_a_bloom_filter))) (type: boolean)
+                    predicate: (a BETWEEN DynamicValue(RS_10_nonacidorctbl__col0_min) AND DynamicValue(RS_10_nonacidorctbl__col0_max) and in_bloom_filter(a, DynamicValue(RS_10_nonacidorctbl__col0_bloom_filter))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Reduce Output Operator
                       key expressions: a (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/699d6ce3/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out
index b910df4..0098b89 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out
@@ -189,7 +189,7 @@ Stage-0
                   Select Operator [SEL_2] (rows=2000 width=87)
                     Output:["_col0"]
                     Filter Operator [FIL_17] (rows=2000 width=87)
-                      predicate:(key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter))))
+                      predicate:(key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter))))
                       TableScan [TS_0] (rows=2000 width=87)
                         default@srcpart_date,srcpart_date,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
                       <-Reducer 5 [BROADCAST_EDGE] llap
@@ -405,7 +405,7 @@ Stage-0
                   Select Operator [SEL_8] (rows=2000 width=87)
                     Output:["_col0"]
                     Filter Operator [FIL_27] (rows=2000 width=87)
-                      predicate:(key is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key_min) AND DynamicValue(RS_10_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key_bloom_filter))))
+                      predicate:(key is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key1_bloom_filter))))
                       TableScan [TS_6] (rows=2000 width=87)
                         default@srcpart_date,srcpart_date,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
                       <-Reducer 5 [BROADCAST_EDGE] llap
@@ -425,7 +425,7 @@ Stage-0
                   Select Operator [SEL_2] (rows=9174 width=70)
                     Output:["_col0"]
                     Filter Operator [FIL_25] (rows=9174 width=70)
-                      predicate:(cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_small_cstring_min) AND DynamicValue(RS_10_srcpart_small_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_small_cstring_bloom_filter))) and (cstring BETWEEN DynamicValue(RS_11_srcpart_date_cstring_min) AND DynamicValue(RS_11_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_11_srcpart_date_cstring_bloom_filter))))
+                      predicate:(cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_small_key1_bloom_filter))) and (cstring BETWEEN DynamicValue(RS_11_srcpart_date_key_min) AND DynamicValue(RS_11_srcpart_date_key_max) and in_bloom_filter(cstring, DynamicValue(RS_11_srcpart_date_key_bloom_filter))))
                       TableScan [TS_0] (rows=12288 width=70)
                         default@alltypesorc_int,alltypesorc_int,Tbl:COMPLETE,Col:COMPLETE,Output:["cstring"]
                       <-Reducer 5 [BROADCAST_EDGE] llap
@@ -568,7 +568,7 @@ Stage-0
                   Select Operator [SEL_2] (rows=2000 width=178)
                     Output:["_col0","_col1"]
                     Filter Operator [FIL_17] (rows=2000 width=178)
-                      predicate:(key is not null and value is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter))) and (value BETWEEN DynamicValue(RS_7_srcpart_small_value_min) AND DynamicValue(RS_7_srcpart_small_value_max) and in_bloom_filter(value, DynamicValue(RS_7_srcpart_small_value_bloom_filter))))
+                      predicate:(key is not null and value is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter))) and (value BETWEEN DynamicValue(RS_7_srcpart_small_value1_min) AND DynamicValue(RS_7_srcpart_small_value1_max) and in_bloom_filter(value, DynamicValue(RS_7_srcpart_small_value1_bloom_filter))))
                       TableScan [TS_0] (rows=2000 width=178)
                         default@srcpart_date,srcpart_date,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
                       <-Reducer 5 [BROADCAST_EDGE] llap
@@ -741,7 +741,7 @@ Stage-0
                       Select Operator [SEL_2] (rows=2000 width=178)
                         Output:["_col0","_col1"]
                         Filter Operator [FIL_26] (rows=2000 width=178)
-                          predicate:(key is not null and value is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key_min) AND DynamicValue(RS_10_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key_bloom_filter))))
+                          predicate:(key is not null and value is not null and (key BETWEEN DynamicValue(RS_10_srcpart_small_key1_min) AND DynamicValue(RS_10_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_10_srcpart_small_key1_bloom_filter))))
                           TableScan [TS_0] (rows=2000 width=178)
                             default@srcpart_date,srcpart_date,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
                           <-Reducer 7 [BROADCAST_EDGE] llap
@@ -761,7 +761,7 @@ Stage-0
                   Select Operator [SEL_8] (rows=9174 width=70)
                     Output:["_col0"]
                     Filter Operator [FIL_28] (rows=9174 width=70)
-                      predicate:(cstring is not null and (cstring BETWEEN DynamicValue(RS_12_srcpart_date_cstring_min) AND DynamicValue(RS_12_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_12_srcpart_date_cstring_bloom_filter))))
+                      predicate:(cstring is not null and (cstring BETWEEN DynamicValue(RS_12_srcpart_date__col1_min) AND DynamicValue(RS_12_srcpart_date__col1_max) and in_bloom_filter(cstring, DynamicValue(RS_12_srcpart_date__col1_bloom_filter))))
                       TableScan [TS_6] (rows=12288 width=70)
                         default@alltypesorc_int,alltypesorc_int,Tbl:COMPLETE,Col:COMPLETE,Output:["cstring"]
                       <-Reducer 5 [BROADCAST_EDGE] llap
@@ -820,12 +820,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date
-                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter)))) (type: boolean)
+                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key_min) AND DynamicValue(RS_7_srcpart_small_key_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key_bloom_filter)))) (type: boolean)
+                    predicate: (key is not null and (key BETWEEN DynamicValue(RS_7_srcpart_small_key1_min) AND DynamicValue(RS_7_srcpart_small_key1_max) and in_bloom_filter(key, DynamicValue(RS_7_srcpart_small_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/699d6ce3/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out b/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
index 388888e..3ffc235 100644
--- a/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
+++ b/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
@@ -183,10 +183,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: k
-                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_7_v_str_min) AND DynamicValue(RS_7_v_str_max) and in_bloom_filter(str, DynamicValue(RS_7_v_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_7_v_key1_min) AND DynamicValue(RS_7_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_7_v_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_7_v_str_min) AND DynamicValue(RS_7_v_str_max) and in_bloom_filter(str, DynamicValue(RS_7_v_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_7_v_key1_min) AND DynamicValue(RS_7_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_7_v_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: str (type: string)
@@ -413,10 +413,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: i
-                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_7_srcpart_date_cstring_min) AND DynamicValue(RS_7_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_7_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_7_srcpart_date_value_min) AND DynamicValue(RS_7_srcpart_date_value_max) and in_bloom_filter(cstring, DynamicValue(RS_7_srcpart_date_value_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 12288 Data size: 862450 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_7_srcpart_date_cstring_min) AND DynamicValue(RS_7_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_7_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_7_srcpart_date_value_min) AND DynamicValue(RS_7_srcpart_date_value_max) and in_bloom_filter(cstring, DynamicValue(RS_7_srcpart_date_value_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cstring (type: string)
@@ -453,7 +453,7 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
                         Group By Operator
-                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=428)
                           mode: hash
                           outputColumnNames: _col0, _col1, _col2
                           Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
@@ -467,10 +467,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: v
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_key1_min) AND DynamicValue(RS_9_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_str_min) AND DynamicValue(RS_9_i_str_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_str_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_key1_min) AND DynamicValue(RS_9_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_str_min) AND DynamicValue(RS_9_i_str_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_str_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: key1 (type: string)
@@ -564,7 +564,7 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
-                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=428)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
@@ -636,10 +636,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: v
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_i_key1_min) AND DynamicValue(RS_3_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_i_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_i_cstring_min) AND DynamicValue(RS_3_i_cstring_max) and in_bloom_filter(key1, DynamicValue(RS_3_i_cstring_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_i_key1_min) AND DynamicValue(RS_3_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_i_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_i_cstring_min) AND DynamicValue(RS_3_i_cstring_max) and in_bloom_filter(key1, DynamicValue(RS_3_i_cstring_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: key1 (type: string)
@@ -656,10 +656,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: k
-                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_3_i_str_min) AND DynamicValue(RS_3_i_str_max) and in_bloom_filter(str, DynamicValue(RS_3_i_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_3_i_cstring_min) AND DynamicValue(RS_3_i_cstring_max) and in_bloom_filter(str, DynamicValue(RS_3_i_cstring_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_3_i_str_min) AND DynamicValue(RS_3_i_str_max) and in_bloom_filter(str, DynamicValue(RS_3_i_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_3_i_cstring_min) AND DynamicValue(RS_3_i_cstring_max) and in_bloom_filter(str, DynamicValue(RS_3_i_cstring_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: str (type: string)
@@ -789,10 +789,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: v
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_key1_min) AND DynamicValue(RS_3_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_str_min) AND DynamicValue(RS_3_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_str_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_key1_min) AND DynamicValue(RS_3_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_str_min) AND DynamicValue(RS_3_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_str_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: key1 (type: string)
@@ -860,6 +860,106 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: value is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: value (type: string)
+                      outputColumnNames: value
+                      Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: value (type: string)
+                        sort order: +
+                        Map-reduce partition columns: value (type: string)
+                        Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: key1
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: key1 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: key1 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 value (type: string)
+                  1 key1 (type: string)
+                Statistics: Num rows: 9345 Data size: 74760 Basic stats: COMPLETE Column stats: PARTIAL
+                Select Operator
+                  Statistics: Num rows: 9345 Data size: 37380 Basic stats: COMPLETE Column stats: PARTIAL
+                  Group By Operator
+                    aggregations: count()
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                      value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: $f0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
         union all
         select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1)
@@ -957,10 +1057,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_key1_min) AND DynamicValue(RS_6_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_str_min) AND DynamicValue(RS_6_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_str_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_key1_min) AND DynamicValue(RS_6_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_str_min) AND DynamicValue(RS_6_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_str_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: key1 (type: string)
@@ -977,10 +1077,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: d
-                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_21_v_str_min) AND DynamicValue(RS_21_v_str_max) and in_bloom_filter(str, DynamicValue(RS_21_v_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_21_v_key1_min) AND DynamicValue(RS_21_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_21_v_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_21_v_str_min) AND DynamicValue(RS_21_v_str_max) and in_bloom_filter(str, DynamicValue(RS_21_v_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_21_v_key1_min) AND DynamicValue(RS_21_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_21_v_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: str (type: string)
@@ -1117,10 +1217,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: k
-                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_7_v_str_min) AND DynamicValue(RS_7_v_str_max) and in_bloom_filter(str, DynamicValue(RS_7_v_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_7_v_key1_min) AND DynamicValue(RS_7_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_7_v_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_7_v_str_min) AND DynamicValue(RS_7_v_str_max) and in_bloom_filter(str, DynamicValue(RS_7_v_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_7_v_key1_min) AND DynamicValue(RS_7_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_7_v_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: str (type: string)
@@ -1343,10 +1443,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: i
-                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_date_cstring_min) AND DynamicValue(RS_10_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_date_value_min) AND DynamicValue(RS_10_srcpart_date_value_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_date_value_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 12288 Data size: 862450 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_date_cstring_min) AND DynamicValue(RS_10_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_date_value_min) AND DynamicValue(RS_10_srcpart_date_value_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_date_value_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cstring (type: string)
@@ -1363,10 +1463,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date
-                  filterExpr: (str is not null and value is not null and (str BETWEEN DynamicValue(RS_13_v_str_min) AND DynamicValue(RS_13_v_str_max) and in_bloom_filter(str, DynamicValue(RS_13_v_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and value is not null and (str BETWEEN DynamicValue(RS_13_v_key1_min) AND DynamicValue(RS_13_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_13_v_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and value is not null and (str BETWEEN DynamicValue(RS_13_v_str_min) AND DynamicValue(RS_13_v_str_max) and in_bloom_filter(str, DynamicValue(RS_13_v_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and value is not null and (str BETWEEN DynamicValue(RS_13_v_key1_min) AND DynamicValue(RS_13_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_13_v_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: str (type: string), value (type: string)
@@ -1383,7 +1483,7 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
                         Group By Operator
-                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=428)
                           mode: hash
                           outputColumnNames: _col0, _col1, _col2
                           Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1480,7 +1580,7 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
-                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=428)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
@@ -1564,10 +1664,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: v
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_key1_min) AND DynamicValue(RS_9_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_cstring_min) AND DynamicValue(RS_9_i_cstring_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_cstring_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_key1_min) AND DynamicValue(RS_9_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_cstring_min) AND DynamicValue(RS_9_i_cstring_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_cstring_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: key1 (type: string)
@@ -1584,10 +1684,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: k
-                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_9_i_str_min) AND DynamicValue(RS_9_i_str_max) and in_bloom_filter(str, DynamicValue(RS_9_i_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_9_i_cstring_min) AND DynamicValue(RS_9_i_cstring_max) and in_bloom_filter(str, DynamicValue(RS_9_i_cstring_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_9_i_str_min) AND DynamicValue(RS_9_i_str_max) and in_bloom_filter(str, DynamicValue(RS_9_i_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_9_i_cstring_min) AND DynamicValue(RS_9_i_cstring_max) and in_bloom_filter(str, DynamicValue(RS_9_i_cstring_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: str (type: string)
@@ -1715,10 +1815,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: v
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_key1_min) AND DynamicValue(RS_6_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_str_min) AND DynamicValue(RS_6_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_str_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_key1_min) AND DynamicValue(RS_6_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_str_min) AND DynamicValue(RS_6_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_str_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
                       expressions: key1 (type: string)
@@ -1784,6 +1884,104 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: value is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 9345 Data size: 74760 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
         union all
         select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1)
@@ -1873,10 +2071,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_key1_min) AND DynamicValue(RS_3_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_str_min) AND DynamicValue(RS_3_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_str_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_key1_min) AND DynamicValue(RS_3_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_str_min) AND DynamicValue(RS_3_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_str_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: key1 (type: string)
@@ -1889,10 +2087,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: d
-                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_17_v_str_min) AND DynamicValue(RS_17_v_str_max) and in_bloom_filter(str, DynamicValue(RS_17_v_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_17_v_key1_min) AND DynamicValue(RS_17_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_17_v_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_17_v_str_min) AND DynamicValue(RS_17_v_str_max) and in_bloom_filter(str, DynamicValue(RS_17_v_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_17_v_key1_min) AND DynamicValue(RS_17_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_17_v_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: str (type: string)
@@ -2025,10 +2223,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: k
-                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_5_v_str_min) AND DynamicValue(RS_5_v_str_max) and in_bloom_filter(str, DynamicValue(RS_5_v_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_5_v_key1_min) AND DynamicValue(RS_5_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_5_v_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_5_v_str_min) AND DynamicValue(RS_5_v_str_max) and in_bloom_filter(str, DynamicValue(RS_5_v_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_5_v_key1_min) AND DynamicValue(RS_5_v_key1_max) and in_bloom_filter(str, DynamicValue(RS_5_v_key1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: str (type: string)
@@ -2265,10 +2463,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: v
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_4_srcpart_date_key1_min) AND DynamicValue(RS_4_srcpart_date_key1_max) and in_bloom_filter(key1, DynamicValue(RS_4_srcpart_date_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_4_srcpart_date_str_min) AND DynamicValue(RS_4_srcpart_date_str_max) and in_bloom_filter(key1, DynamicValue(RS_4_srcpart_date_str_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_4_srcpart_date_key1_min) AND DynamicValue(RS_4_srcpart_date_key1_max) and in_bloom_filter(key1, DynamicValue(RS_4_srcpart_date_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_4_srcpart_date_str_min) AND DynamicValue(RS_4_srcpart_date_str_max) and in_bloom_filter(key1, DynamicValue(RS_4_srcpart_date_str_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: key1 (type: string)
@@ -2281,10 +2479,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: i
-                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_9_srcpart_date_cstring_min) AND DynamicValue(RS_9_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_9_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_9_srcpart_date__col1_min) AND DynamicValue(RS_9_srcpart_date__col1_max) and in_bloom_filter(cstring, DynamicValue(RS_9_srcpart_date__col1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 12288 Data size: 862450 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_9_srcpart_date_cstring_min) AND DynamicValue(RS_9_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_9_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_9_srcpart_date__col1_min) AND DynamicValue(RS_9_srcpart_date__col1_max) and in_bloom_filter(cstring, DynamicValue(RS_9_srcpart_date__col1_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: cstring (type: string)
@@ -2314,7 +2512,7 @@ STAGE PLANS:
                   outputColumnNames: _col0
                   Statistics: Num rows: 9756 Data size: 887796 Basic stats: COMPLETE Column stats: PARTIAL
                   Group By Operator
-                    aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
+                    aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=428)
                     mode: hash
                     outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
@@ -2360,7 +2558,7 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
-                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=428)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
@@ -2411,10 +2609,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: k
-                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_8_i_str_min) AND DynamicValue(RS_8_i_str_max) and in_bloom_filter(str, DynamicValue(RS_8_i_str_bloom_filter)))) (type: boolean)
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_8_i_cstring_min) AND DynamicValue(RS_8_i_cstring_max) and in_bloom_filter(str, DynamicValue(RS_8_i_cstring_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_8_i_str_min) AND DynamicValue(RS_8_i_str_max) and in_bloom_filter(str, DynamicValue(RS_8_i_str_bloom_filter)))) (type: boolean)
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_8_i_cstring_min) AND DynamicValue(RS_8_i_cstring_max) and in_bloom_filter(str, DynamicValue(RS_8_i_cstring_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: str (type: string)
@@ -2427,10 +2625,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: v
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_8_i_key1_min) AND DynamicValue(RS_8_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_8_i_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_8_i_cstring_min) AND DynamicValue(RS_8_i_cstring_max) and in_bloom_filter(key1, DynamicValue(RS_8_i_cstring_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_8_i_key1_min) AND DynamicValue(RS_8_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_8_i_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_8_i_cstring_min) AND DynamicValue(RS_8_i_cstring_max) and in_bloom_filter(key1, DynamicValue(RS_8_i_cstring_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: key1 (type: string)
@@ -2579,10 +2777,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: v
-                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_key1_min) AND DynamicValue(RS_3_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_key1_bloom_filter)))) (type: boolean)
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_str_min) AND DynamicValue(RS_3_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_str_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
-                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_key1_min) AND DynamicValue(RS_3_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_key1_bloom_filter)))) (type: boolean)
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_str_min) AND DynamicValue(RS_3_k_str_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_str_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                     Reduce Output Operator
                       key expressions: key1 (type: string)
@@ -2644,3 +2842,93 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.value = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: value is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: value (type: string)
+                      sort order: +
+                      Map-reduce partition columns: value (type: string)
+                      Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: key1 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key1 (type: string)
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 value (type: string)
+                  1 key1 (type: string)
+                Statistics: Num rows: 9345 Data size: 74760 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+


[08/50] [abbrv] hive git commit: HIVE-16581 : bug in HIVE-16523 (Sergey Shelukhin, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
HIVE-16581 :  bug in HIVE-16523 (Sergey Shelukhin, reviewed by Gopal Vijayaraghavan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d769f35f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d769f35f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d769f35f

Branch: refs/heads/hive-14535
Commit: d769f35fbc50ee6f8ce7b6994444246ea9138767
Parents: 740779f
Author: sergey <se...@apache.org>
Authored: Wed May 3 19:13:26 2017 -0700
Committer: sergey <se...@apache.org>
Committed: Wed May 3 19:17:17 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hive/common/util/Murmur3.java    |  8 +++----
 .../apache/hive/common/util/TestMurmur3.java    | 24 ++++++++++++++++++++
 2 files changed, 28 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d769f35f/storage-api/src/java/org/apache/hive/common/util/Murmur3.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hive/common/util/Murmur3.java b/storage-api/src/java/org/apache/hive/common/util/Murmur3.java
index 1c56765..eb05082 100644
--- a/storage-api/src/java/org/apache/hive/common/util/Murmur3.java
+++ b/storage-api/src/java/org/apache/hive/common/util/Murmur3.java
@@ -52,7 +52,7 @@ public class Murmur3 {
   private static final int N1 = 0x52dce729;
   private static final int N2 = 0x38495ab5;
 
-  private static final int DEFAULT_SEED = 104729;
+  public static final int DEFAULT_SEED = 104729;
 
   /**
    * Murmur3 32-bit variant.
@@ -358,13 +358,13 @@ public class Murmur3 {
         int k = -1;
         switch (tailLen) {
         case 1:
-          k = orBytes(tail[0], data[0], data[1], data[2]);
+          k = orBytes(tail[0], data[offset], data[offset + 1], data[offset + 2]);
           break;
         case 2:
-          k = orBytes(tail[0], tail[1], data[0], data[1]);
+          k = orBytes(tail[0], tail[1], data[offset], data[offset + 1]);
           break;
         case 3:
-          k = orBytes(tail[0], tail[1], tail[2], data[0]);
+          k = orBytes(tail[0], tail[1], tail[2], data[offset]);
           break;
         default: throw new AssertionError(tailLen);
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/d769f35f/storage-api/src/test/org/apache/hive/common/util/TestMurmur3.java
----------------------------------------------------------------------
diff --git a/storage-api/src/test/org/apache/hive/common/util/TestMurmur3.java b/storage-api/src/test/org/apache/hive/common/util/TestMurmur3.java
index 5facc7c..391ee42 100644
--- a/storage-api/src/test/org/apache/hive/common/util/TestMurmur3.java
+++ b/storage-api/src/test/org/apache/hive/common/util/TestMurmur3.java
@@ -19,6 +19,7 @@
 package org.apache.hive.common.util;
 
 import static org.junit.Assert.assertEquals;
+import org.apache.hive.common.util.Murmur3.IncrementalHash32;
 
 import com.google.common.hash.HashFunction;
 import com.google.common.hash.Hashing;
@@ -221,4 +222,27 @@ public class TestMurmur3 {
       assertEquals(gl2, m2);
     }
   }
+  
+
+  @Test
+  public void testIncremental() {
+    final int seed = 123, arraySize = 1023;
+    byte[] bytes = new byte[arraySize];
+    new Random(seed).nextBytes(bytes);
+    int expected = Murmur3.hash32(bytes);
+    Murmur3.IncrementalHash32 same = new IncrementalHash32(), diff = new IncrementalHash32();
+    for (int blockSize = 1; blockSize <= arraySize; ++blockSize) {
+      byte[] block = new byte[blockSize];
+      same.start(Murmur3.DEFAULT_SEED);
+      diff.start(Murmur3.DEFAULT_SEED);
+      for (int offset = 0; offset < arraySize; offset += blockSize) {
+        int length = Math.min(arraySize - offset, blockSize);
+        same.add(bytes, offset, length);
+        System.arraycopy(bytes, offset, block, 0, length);
+        diff.add(block, 0, length);
+      }
+      assertEquals("Block size " + blockSize, expected, same.end());
+      assertEquals("Block size " + blockSize, expected, diff.end());
+    }
+  }
 }


[48/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index df49615,9042cdb..4e3b2af
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@@ -1240,14 -1240,14 +1240,14 @@@ uint32_t ThriftHiveMetastore_get_databa
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size840;
-             ::apache::thrift::protocol::TType _etype843;
-             xfer += iprot->readListBegin(_etype843, _size840);
-             this->success.resize(_size840);
-             uint32_t _i844;
-             for (_i844 = 0; _i844 < _size840; ++_i844)
 -            uint32_t _size817;
 -            ::apache::thrift::protocol::TType _etype820;
 -            xfer += iprot->readListBegin(_etype820, _size817);
 -            this->success.resize(_size817);
 -            uint32_t _i821;
 -            for (_i821 = 0; _i821 < _size817; ++_i821)
++            uint32_t _size839;
++            ::apache::thrift::protocol::TType _etype842;
++            xfer += iprot->readListBegin(_etype842, _size839);
++            this->success.resize(_size839);
++            uint32_t _i843;
++            for (_i843 = 0; _i843 < _size839; ++_i843)
              {
-               xfer += iprot->readString(this->success[_i844]);
 -              xfer += iprot->readString(this->success[_i821]);
++              xfer += iprot->readString(this->success[_i843]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -1286,10 -1286,10 +1286,10 @@@ uint32_t ThriftHiveMetastore_get_databa
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-       std::vector<std::string> ::const_iterator _iter845;
-       for (_iter845 = this->success.begin(); _iter845 != this->success.end(); ++_iter845)
 -      std::vector<std::string> ::const_iterator _iter822;
 -      for (_iter822 = this->success.begin(); _iter822 != this->success.end(); ++_iter822)
++      std::vector<std::string> ::const_iterator _iter844;
++      for (_iter844 = this->success.begin(); _iter844 != this->success.end(); ++_iter844)
        {
-         xfer += oprot->writeString((*_iter845));
 -        xfer += oprot->writeString((*_iter822));
++        xfer += oprot->writeString((*_iter844));
        }
        xfer += oprot->writeListEnd();
      }
@@@ -1334,14 -1334,14 +1334,14 @@@ uint32_t ThriftHiveMetastore_get_databa
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size846;
-             ::apache::thrift::protocol::TType _etype849;
-             xfer += iprot->readListBegin(_etype849, _size846);
-             (*(this->success)).resize(_size846);
-             uint32_t _i850;
-             for (_i850 = 0; _i850 < _size846; ++_i850)
 -            uint32_t _size823;
 -            ::apache::thrift::protocol::TType _etype826;
 -            xfer += iprot->readListBegin(_etype826, _size823);
 -            (*(this->success)).resize(_size823);
 -            uint32_t _i827;
 -            for (_i827 = 0; _i827 < _size823; ++_i827)
++            uint32_t _size845;
++            ::apache::thrift::protocol::TType _etype848;
++            xfer += iprot->readListBegin(_etype848, _size845);
++            (*(this->success)).resize(_size845);
++            uint32_t _i849;
++            for (_i849 = 0; _i849 < _size845; ++_i849)
              {
-               xfer += iprot->readString((*(this->success))[_i850]);
 -              xfer += iprot->readString((*(this->success))[_i827]);
++              xfer += iprot->readString((*(this->success))[_i849]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -1458,14 -1458,14 +1458,14 @@@ uint32_t ThriftHiveMetastore_get_all_da
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size851;
-             ::apache::thrift::protocol::TType _etype854;
-             xfer += iprot->readListBegin(_etype854, _size851);
-             this->success.resize(_size851);
-             uint32_t _i855;
-             for (_i855 = 0; _i855 < _size851; ++_i855)
 -            uint32_t _size828;
 -            ::apache::thrift::protocol::TType _etype831;
 -            xfer += iprot->readListBegin(_etype831, _size828);
 -            this->success.resize(_size828);
 -            uint32_t _i832;
 -            for (_i832 = 0; _i832 < _size828; ++_i832)
++            uint32_t _size850;
++            ::apache::thrift::protocol::TType _etype853;
++            xfer += iprot->readListBegin(_etype853, _size850);
++            this->success.resize(_size850);
++            uint32_t _i854;
++            for (_i854 = 0; _i854 < _size850; ++_i854)
              {
-               xfer += iprot->readString(this->success[_i855]);
 -              xfer += iprot->readString(this->success[_i832]);
++              xfer += iprot->readString(this->success[_i854]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -1504,10 -1504,10 +1504,10 @@@ uint32_t ThriftHiveMetastore_get_all_da
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-       std::vector<std::string> ::const_iterator _iter856;
-       for (_iter856 = this->success.begin(); _iter856 != this->success.end(); ++_iter856)
 -      std::vector<std::string> ::const_iterator _iter833;
 -      for (_iter833 = this->success.begin(); _iter833 != this->success.end(); ++_iter833)
++      std::vector<std::string> ::const_iterator _iter855;
++      for (_iter855 = this->success.begin(); _iter855 != this->success.end(); ++_iter855)
        {
-         xfer += oprot->writeString((*_iter856));
 -        xfer += oprot->writeString((*_iter833));
++        xfer += oprot->writeString((*_iter855));
        }
        xfer += oprot->writeListEnd();
      }
@@@ -1552,14 -1552,14 +1552,14 @@@ uint32_t ThriftHiveMetastore_get_all_da
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size857;
-             ::apache::thrift::protocol::TType _etype860;
-             xfer += iprot->readListBegin(_etype860, _size857);
-             (*(this->success)).resize(_size857);
-             uint32_t _i861;
-             for (_i861 = 0; _i861 < _size857; ++_i861)
 -            uint32_t _size834;
 -            ::apache::thrift::protocol::TType _etype837;
 -            xfer += iprot->readListBegin(_etype837, _size834);
 -            (*(this->success)).resize(_size834);
 -            uint32_t _i838;
 -            for (_i838 = 0; _i838 < _size834; ++_i838)
++            uint32_t _size856;
++            ::apache::thrift::protocol::TType _etype859;
++            xfer += iprot->readListBegin(_etype859, _size856);
++            (*(this->success)).resize(_size856);
++            uint32_t _i860;
++            for (_i860 = 0; _i860 < _size856; ++_i860)
              {
-               xfer += iprot->readString((*(this->success))[_i861]);
 -              xfer += iprot->readString((*(this->success))[_i838]);
++              xfer += iprot->readString((*(this->success))[_i860]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -2621,17 -2621,17 +2621,17 @@@ uint32_t ThriftHiveMetastore_get_type_a
          if (ftype == ::apache::thrift::protocol::T_MAP) {
            {
              this->success.clear();
-             uint32_t _size862;
-             ::apache::thrift::protocol::TType _ktype863;
-             ::apache::thrift::protocol::TType _vtype864;
-             xfer += iprot->readMapBegin(_ktype863, _vtype864, _size862);
-             uint32_t _i866;
-             for (_i866 = 0; _i866 < _size862; ++_i866)
 -            uint32_t _size839;
 -            ::apache::thrift::protocol::TType _ktype840;
 -            ::apache::thrift::protocol::TType _vtype841;
 -            xfer += iprot->readMapBegin(_ktype840, _vtype841, _size839);
 -            uint32_t _i843;
 -            for (_i843 = 0; _i843 < _size839; ++_i843)
++            uint32_t _size861;
++            ::apache::thrift::protocol::TType _ktype862;
++            ::apache::thrift::protocol::TType _vtype863;
++            xfer += iprot->readMapBegin(_ktype862, _vtype863, _size861);
++            uint32_t _i865;
++            for (_i865 = 0; _i865 < _size861; ++_i865)
              {
-               std::string _key867;
-               xfer += iprot->readString(_key867);
-               Type& _val868 = this->success[_key867];
-               xfer += _val868.read(iprot);
 -              std::string _key844;
 -              xfer += iprot->readString(_key844);
 -              Type& _val845 = this->success[_key844];
 -              xfer += _val845.read(iprot);
++              std::string _key866;
++              xfer += iprot->readString(_key866);
++              Type& _val867 = this->success[_key866];
++              xfer += _val867.read(iprot);
              }
              xfer += iprot->readMapEnd();
            }
@@@ -2670,11 -2670,11 +2670,11 @@@ uint32_t ThriftHiveMetastore_get_type_a
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
      {
        xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-       std::map<std::string, Type> ::const_iterator _iter869;
-       for (_iter869 = this->success.begin(); _iter869 != this->success.end(); ++_iter869)
 -      std::map<std::string, Type> ::const_iterator _iter846;
 -      for (_iter846 = this->success.begin(); _iter846 != this->success.end(); ++_iter846)
++      std::map<std::string, Type> ::const_iterator _iter868;
++      for (_iter868 = this->success.begin(); _iter868 != this->success.end(); ++_iter868)
        {
-         xfer += oprot->writeString(_iter869->first);
-         xfer += _iter869->second.write(oprot);
 -        xfer += oprot->writeString(_iter846->first);
 -        xfer += _iter846->second.write(oprot);
++        xfer += oprot->writeString(_iter868->first);
++        xfer += _iter868->second.write(oprot);
        }
        xfer += oprot->writeMapEnd();
      }
@@@ -2719,17 -2719,17 +2719,17 @@@ uint32_t ThriftHiveMetastore_get_type_a
          if (ftype == ::apache::thrift::protocol::T_MAP) {
            {
              (*(this->success)).clear();
-             uint32_t _size870;
-             ::apache::thrift::protocol::TType _ktype871;
-             ::apache::thrift::protocol::TType _vtype872;
-             xfer += iprot->readMapBegin(_ktype871, _vtype872, _size870);
-             uint32_t _i874;
-             for (_i874 = 0; _i874 < _size870; ++_i874)
 -            uint32_t _size847;
 -            ::apache::thrift::protocol::TType _ktype848;
 -            ::apache::thrift::protocol::TType _vtype849;
 -            xfer += iprot->readMapBegin(_ktype848, _vtype849, _size847);
 -            uint32_t _i851;
 -            for (_i851 = 0; _i851 < _size847; ++_i851)
++            uint32_t _size869;
++            ::apache::thrift::protocol::TType _ktype870;
++            ::apache::thrift::protocol::TType _vtype871;
++            xfer += iprot->readMapBegin(_ktype870, _vtype871, _size869);
++            uint32_t _i873;
++            for (_i873 = 0; _i873 < _size869; ++_i873)
              {
-               std::string _key875;
-               xfer += iprot->readString(_key875);
-               Type& _val876 = (*(this->success))[_key875];
-               xfer += _val876.read(iprot);
 -              std::string _key852;
 -              xfer += iprot->readString(_key852);
 -              Type& _val853 = (*(this->success))[_key852];
 -              xfer += _val853.read(iprot);
++              std::string _key874;
++              xfer += iprot->readString(_key874);
++              Type& _val875 = (*(this->success))[_key874];
++              xfer += _val875.read(iprot);
              }
              xfer += iprot->readMapEnd();
            }
@@@ -2883,14 -2883,14 +2883,14 @@@ uint32_t ThriftHiveMetastore_get_fields
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size877;
-             ::apache::thrift::protocol::TType _etype880;
-             xfer += iprot->readListBegin(_etype880, _size877);
-             this->success.resize(_size877);
-             uint32_t _i881;
-             for (_i881 = 0; _i881 < _size877; ++_i881)
 -            uint32_t _size854;
 -            ::apache::thrift::protocol::TType _etype857;
 -            xfer += iprot->readListBegin(_etype857, _size854);
 -            this->success.resize(_size854);
 -            uint32_t _i858;
 -            for (_i858 = 0; _i858 < _size854; ++_i858)
++            uint32_t _size876;
++            ::apache::thrift::protocol::TType _etype879;
++            xfer += iprot->readListBegin(_etype879, _size876);
++            this->success.resize(_size876);
++            uint32_t _i880;
++            for (_i880 = 0; _i880 < _size876; ++_i880)
              {
-               xfer += this->success[_i881].read(iprot);
 -              xfer += this->success[_i858].read(iprot);
++              xfer += this->success[_i880].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -2945,10 -2945,10 +2945,10 @@@ uint32_t ThriftHiveMetastore_get_fields
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-       std::vector<FieldSchema> ::const_iterator _iter882;
-       for (_iter882 = this->success.begin(); _iter882 != this->success.end(); ++_iter882)
 -      std::vector<FieldSchema> ::const_iterator _iter859;
 -      for (_iter859 = this->success.begin(); _iter859 != this->success.end(); ++_iter859)
++      std::vector<FieldSchema> ::const_iterator _iter881;
++      for (_iter881 = this->success.begin(); _iter881 != this->success.end(); ++_iter881)
        {
-         xfer += (*_iter882).write(oprot);
 -        xfer += (*_iter859).write(oprot);
++        xfer += (*_iter881).write(oprot);
        }
        xfer += oprot->writeListEnd();
      }
@@@ -3001,14 -3001,14 +3001,14 @@@ uint32_t ThriftHiveMetastore_get_fields
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size883;
-             ::apache::thrift::protocol::TType _etype886;
-             xfer += iprot->readListBegin(_etype886, _size883);
-             (*(this->success)).resize(_size883);
-             uint32_t _i887;
-             for (_i887 = 0; _i887 < _size883; ++_i887)
 -            uint32_t _size860;
 -            ::apache::thrift::protocol::TType _etype863;
 -            xfer += iprot->readListBegin(_etype863, _size860);
 -            (*(this->success)).resize(_size860);
 -            uint32_t _i864;
 -            for (_i864 = 0; _i864 < _size860; ++_i864)
++            uint32_t _size882;
++            ::apache::thrift::protocol::TType _etype885;
++            xfer += iprot->readListBegin(_etype885, _size882);
++            (*(this->success)).resize(_size882);
++            uint32_t _i886;
++            for (_i886 = 0; _i886 < _size882; ++_i886)
              {
-               xfer += (*(this->success))[_i887].read(iprot);
 -              xfer += (*(this->success))[_i864].read(iprot);
++              xfer += (*(this->success))[_i886].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -3194,14 -3194,14 +3194,14 @@@ uint32_t ThriftHiveMetastore_get_fields
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size888;
-             ::apache::thrift::protocol::TType _etype891;
-             xfer += iprot->readListBegin(_etype891, _size888);
-             this->success.resize(_size888);
-             uint32_t _i892;
-             for (_i892 = 0; _i892 < _size888; ++_i892)
 -            uint32_t _size865;
 -            ::apache::thrift::protocol::TType _etype868;
 -            xfer += iprot->readListBegin(_etype868, _size865);
 -            this->success.resize(_size865);
 -            uint32_t _i869;
 -            for (_i869 = 0; _i869 < _size865; ++_i869)
++            uint32_t _size887;
++            ::apache::thrift::protocol::TType _etype890;
++            xfer += iprot->readListBegin(_etype890, _size887);
++            this->success.resize(_size887);
++            uint32_t _i891;
++            for (_i891 = 0; _i891 < _size887; ++_i891)
              {
-               xfer += this->success[_i892].read(iprot);
 -              xfer += this->success[_i869].read(iprot);
++              xfer += this->success[_i891].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -3256,10 -3256,10 +3256,10 @@@ uint32_t ThriftHiveMetastore_get_fields
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-       std::vector<FieldSchema> ::const_iterator _iter893;
-       for (_iter893 = this->success.begin(); _iter893 != this->success.end(); ++_iter893)
 -      std::vector<FieldSchema> ::const_iterator _iter870;
 -      for (_iter870 = this->success.begin(); _iter870 != this->success.end(); ++_iter870)
++      std::vector<FieldSchema> ::const_iterator _iter892;
++      for (_iter892 = this->success.begin(); _iter892 != this->success.end(); ++_iter892)
        {
-         xfer += (*_iter893).write(oprot);
 -        xfer += (*_iter870).write(oprot);
++        xfer += (*_iter892).write(oprot);
        }
        xfer += oprot->writeListEnd();
      }
@@@ -3312,14 -3312,14 +3312,14 @@@ uint32_t ThriftHiveMetastore_get_fields
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size894;
-             ::apache::thrift::protocol::TType _etype897;
-             xfer += iprot->readListBegin(_etype897, _size894);
-             (*(this->success)).resize(_size894);
-             uint32_t _i898;
-             for (_i898 = 0; _i898 < _size894; ++_i898)
 -            uint32_t _size871;
 -            ::apache::thrift::protocol::TType _etype874;
 -            xfer += iprot->readListBegin(_etype874, _size871);
 -            (*(this->success)).resize(_size871);
 -            uint32_t _i875;
 -            for (_i875 = 0; _i875 < _size871; ++_i875)
++            uint32_t _size893;
++            ::apache::thrift::protocol::TType _etype896;
++            xfer += iprot->readListBegin(_etype896, _size893);
++            (*(this->success)).resize(_size893);
++            uint32_t _i897;
++            for (_i897 = 0; _i897 < _size893; ++_i897)
              {
-               xfer += (*(this->success))[_i898].read(iprot);
 -              xfer += (*(this->success))[_i875].read(iprot);
++              xfer += (*(this->success))[_i897].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -3489,14 -3489,14 +3489,14 @@@ uint32_t ThriftHiveMetastore_get_schema
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size899;
-             ::apache::thrift::protocol::TType _etype902;
-             xfer += iprot->readListBegin(_etype902, _size899);
-             this->success.resize(_size899);
-             uint32_t _i903;
-             for (_i903 = 0; _i903 < _size899; ++_i903)
 -            uint32_t _size876;
 -            ::apache::thrift::protocol::TType _etype879;
 -            xfer += iprot->readListBegin(_etype879, _size876);
 -            this->success.resize(_size876);
 -            uint32_t _i880;
 -            for (_i880 = 0; _i880 < _size876; ++_i880)
++            uint32_t _size898;
++            ::apache::thrift::protocol::TType _etype901;
++            xfer += iprot->readListBegin(_etype901, _size898);
++            this->success.resize(_size898);
++            uint32_t _i902;
++            for (_i902 = 0; _i902 < _size898; ++_i902)
              {
-               xfer += this->success[_i903].read(iprot);
 -              xfer += this->success[_i880].read(iprot);
++              xfer += this->success[_i902].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -3551,10 -3551,10 +3551,10 @@@ uint32_t ThriftHiveMetastore_get_schema
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-       std::vector<FieldSchema> ::const_iterator _iter904;
-       for (_iter904 = this->success.begin(); _iter904 != this->success.end(); ++_iter904)
 -      std::vector<FieldSchema> ::const_iterator _iter881;
 -      for (_iter881 = this->success.begin(); _iter881 != this->success.end(); ++_iter881)
++      std::vector<FieldSchema> ::const_iterator _iter903;
++      for (_iter903 = this->success.begin(); _iter903 != this->success.end(); ++_iter903)
        {
-         xfer += (*_iter904).write(oprot);
 -        xfer += (*_iter881).write(oprot);
++        xfer += (*_iter903).write(oprot);
        }
        xfer += oprot->writeListEnd();
      }
@@@ -3607,14 -3607,14 +3607,14 @@@ uint32_t ThriftHiveMetastore_get_schema
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size905;
-             ::apache::thrift::protocol::TType _etype908;
-             xfer += iprot->readListBegin(_etype908, _size905);
-             (*(this->success)).resize(_size905);
-             uint32_t _i909;
-             for (_i909 = 0; _i909 < _size905; ++_i909)
 -            uint32_t _size882;
 -            ::apache::thrift::protocol::TType _etype885;
 -            xfer += iprot->readListBegin(_etype885, _size882);
 -            (*(this->success)).resize(_size882);
 -            uint32_t _i886;
 -            for (_i886 = 0; _i886 < _size882; ++_i886)
++            uint32_t _size904;
++            ::apache::thrift::protocol::TType _etype907;
++            xfer += iprot->readListBegin(_etype907, _size904);
++            (*(this->success)).resize(_size904);
++            uint32_t _i908;
++            for (_i908 = 0; _i908 < _size904; ++_i908)
              {
-               xfer += (*(this->success))[_i909].read(iprot);
 -              xfer += (*(this->success))[_i886].read(iprot);
++              xfer += (*(this->success))[_i908].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -3800,14 -3800,14 +3800,14 @@@ uint32_t ThriftHiveMetastore_get_schema
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size910;
-             ::apache::thrift::protocol::TType _etype913;
-             xfer += iprot->readListBegin(_etype913, _size910);
-             this->success.resize(_size910);
-             uint32_t _i914;
-             for (_i914 = 0; _i914 < _size910; ++_i914)
 -            uint32_t _size887;
 -            ::apache::thrift::protocol::TType _etype890;
 -            xfer += iprot->readListBegin(_etype890, _size887);
 -            this->success.resize(_size887);
 -            uint32_t _i891;
 -            for (_i891 = 0; _i891 < _size887; ++_i891)
++            uint32_t _size909;
++            ::apache::thrift::protocol::TType _etype912;
++            xfer += iprot->readListBegin(_etype912, _size909);
++            this->success.resize(_size909);
++            uint32_t _i913;
++            for (_i913 = 0; _i913 < _size909; ++_i913)
              {
-               xfer += this->success[_i914].read(iprot);
 -              xfer += this->success[_i891].read(iprot);
++              xfer += this->success[_i913].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -3862,10 -3862,10 +3862,10 @@@ uint32_t ThriftHiveMetastore_get_schema
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-       std::vector<FieldSchema> ::const_iterator _iter915;
-       for (_iter915 = this->success.begin(); _iter915 != this->success.end(); ++_iter915)
 -      std::vector<FieldSchema> ::const_iterator _iter892;
 -      for (_iter892 = this->success.begin(); _iter892 != this->success.end(); ++_iter892)
++      std::vector<FieldSchema> ::const_iterator _iter914;
++      for (_iter914 = this->success.begin(); _iter914 != this->success.end(); ++_iter914)
        {
-         xfer += (*_iter915).write(oprot);
 -        xfer += (*_iter892).write(oprot);
++        xfer += (*_iter914).write(oprot);
        }
        xfer += oprot->writeListEnd();
      }
@@@ -3918,14 -3918,14 +3918,14 @@@ uint32_t ThriftHiveMetastore_get_schema
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size916;
-             ::apache::thrift::protocol::TType _etype919;
-             xfer += iprot->readListBegin(_etype919, _size916);
-             (*(this->success)).resize(_size916);
-             uint32_t _i920;
-             for (_i920 = 0; _i920 < _size916; ++_i920)
 -            uint32_t _size893;
 -            ::apache::thrift::protocol::TType _etype896;
 -            xfer += iprot->readListBegin(_etype896, _size893);
 -            (*(this->success)).resize(_size893);
 -            uint32_t _i897;
 -            for (_i897 = 0; _i897 < _size893; ++_i897)
++            uint32_t _size915;
++            ::apache::thrift::protocol::TType _etype918;
++            xfer += iprot->readListBegin(_etype918, _size915);
++            (*(this->success)).resize(_size915);
++            uint32_t _i919;
++            for (_i919 = 0; _i919 < _size915; ++_i919)
              {
-               xfer += (*(this->success))[_i920].read(iprot);
 -              xfer += (*(this->success))[_i897].read(iprot);
++              xfer += (*(this->success))[_i919].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -4518,14 -4518,14 +4518,14 @@@ uint32_t ThriftHiveMetastore_create_tab
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->primaryKeys.clear();
-             uint32_t _size921;
-             ::apache::thrift::protocol::TType _etype924;
-             xfer += iprot->readListBegin(_etype924, _size921);
-             this->primaryKeys.resize(_size921);
-             uint32_t _i925;
-             for (_i925 = 0; _i925 < _size921; ++_i925)
 -            uint32_t _size898;
 -            ::apache::thrift::protocol::TType _etype901;
 -            xfer += iprot->readListBegin(_etype901, _size898);
 -            this->primaryKeys.resize(_size898);
 -            uint32_t _i902;
 -            for (_i902 = 0; _i902 < _size898; ++_i902)
++            uint32_t _size920;
++            ::apache::thrift::protocol::TType _etype923;
++            xfer += iprot->readListBegin(_etype923, _size920);
++            this->primaryKeys.resize(_size920);
++            uint32_t _i924;
++            for (_i924 = 0; _i924 < _size920; ++_i924)
              {
-               xfer += this->primaryKeys[_i925].read(iprot);
 -              xfer += this->primaryKeys[_i902].read(iprot);
++              xfer += this->primaryKeys[_i924].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -4538,14 -4538,14 +4538,14 @@@
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->foreignKeys.clear();
-             uint32_t _size926;
-             ::apache::thrift::protocol::TType _etype929;
-             xfer += iprot->readListBegin(_etype929, _size926);
-             this->foreignKeys.resize(_size926);
-             uint32_t _i930;
-             for (_i930 = 0; _i930 < _size926; ++_i930)
 -            uint32_t _size903;
 -            ::apache::thrift::protocol::TType _etype906;
 -            xfer += iprot->readListBegin(_etype906, _size903);
 -            this->foreignKeys.resize(_size903);
 -            uint32_t _i907;
 -            for (_i907 = 0; _i907 < _size903; ++_i907)
++            uint32_t _size925;
++            ::apache::thrift::protocol::TType _etype928;
++            xfer += iprot->readListBegin(_etype928, _size925);
++            this->foreignKeys.resize(_size925);
++            uint32_t _i929;
++            for (_i929 = 0; _i929 < _size925; ++_i929)
              {
-               xfer += this->foreignKeys[_i930].read(iprot);
 -              xfer += this->foreignKeys[_i907].read(iprot);
++              xfer += this->foreignKeys[_i929].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -4578,10 -4578,10 +4578,10 @@@ uint32_t ThriftHiveMetastore_create_tab
    xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
-     std::vector<SQLPrimaryKey> ::const_iterator _iter931;
-     for (_iter931 = this->primaryKeys.begin(); _iter931 != this->primaryKeys.end(); ++_iter931)
 -    std::vector<SQLPrimaryKey> ::const_iterator _iter908;
 -    for (_iter908 = this->primaryKeys.begin(); _iter908 != this->primaryKeys.end(); ++_iter908)
++    std::vector<SQLPrimaryKey> ::const_iterator _iter930;
++    for (_iter930 = this->primaryKeys.begin(); _iter930 != this->primaryKeys.end(); ++_iter930)
      {
-       xfer += (*_iter931).write(oprot);
 -      xfer += (*_iter908).write(oprot);
++      xfer += (*_iter930).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -4590,10 -4590,10 +4590,10 @@@
    xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
-     std::vector<SQLForeignKey> ::const_iterator _iter932;
-     for (_iter932 = this->foreignKeys.begin(); _iter932 != this->foreignKeys.end(); ++_iter932)
 -    std::vector<SQLForeignKey> ::const_iterator _iter909;
 -    for (_iter909 = this->foreignKeys.begin(); _iter909 != this->foreignKeys.end(); ++_iter909)
++    std::vector<SQLForeignKey> ::const_iterator _iter931;
++    for (_iter931 = this->foreignKeys.begin(); _iter931 != this->foreignKeys.end(); ++_iter931)
      {
-       xfer += (*_iter932).write(oprot);
 -      xfer += (*_iter909).write(oprot);
++      xfer += (*_iter931).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -4621,10 -4621,10 +4621,10 @@@ uint32_t ThriftHiveMetastore_create_tab
    xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
-     std::vector<SQLPrimaryKey> ::const_iterator _iter933;
-     for (_iter933 = (*(this->primaryKeys)).begin(); _iter933 != (*(this->primaryKeys)).end(); ++_iter933)
 -    std::vector<SQLPrimaryKey> ::const_iterator _iter910;
 -    for (_iter910 = (*(this->primaryKeys)).begin(); _iter910 != (*(this->primaryKeys)).end(); ++_iter910)
++    std::vector<SQLPrimaryKey> ::const_iterator _iter932;
++    for (_iter932 = (*(this->primaryKeys)).begin(); _iter932 != (*(this->primaryKeys)).end(); ++_iter932)
      {
-       xfer += (*_iter933).write(oprot);
 -      xfer += (*_iter910).write(oprot);
++      xfer += (*_iter932).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -4633,10 -4633,10 +4633,10 @@@
    xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
-     std::vector<SQLForeignKey> ::const_iterator _iter934;
-     for (_iter934 = (*(this->foreignKeys)).begin(); _iter934 != (*(this->foreignKeys)).end(); ++_iter934)
 -    std::vector<SQLForeignKey> ::const_iterator _iter911;
 -    for (_iter911 = (*(this->foreignKeys)).begin(); _iter911 != (*(this->foreignKeys)).end(); ++_iter911)
++    std::vector<SQLForeignKey> ::const_iterator _iter933;
++    for (_iter933 = (*(this->foreignKeys)).begin(); _iter933 != (*(this->foreignKeys)).end(); ++_iter933)
      {
-       xfer += (*_iter934).write(oprot);
 -      xfer += (*_iter911).write(oprot);
++      xfer += (*_iter933).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -5931,6 -5931,253 +5931,253 @@@ uint32_t ThriftHiveMetastore_drop_table
  }
  
  
+ ThriftHiveMetastore_truncate_table_args::~ThriftHiveMetastore_truncate_table_args() throw() {
+ }
+ 
+ 
+ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+ 
+   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+   uint32_t xfer = 0;
+   std::string fname;
+   ::apache::thrift::protocol::TType ftype;
+   int16_t fid;
+ 
+   xfer += iprot->readStructBegin(fname);
+ 
+   using ::apache::thrift::protocol::TProtocolException;
+ 
+ 
+   while (true)
+   {
+     xfer += iprot->readFieldBegin(fname, ftype, fid);
+     if (ftype == ::apache::thrift::protocol::T_STOP) {
+       break;
+     }
+     switch (fid)
+     {
+       case 1:
+         if (ftype == ::apache::thrift::protocol::T_STRING) {
+           xfer += iprot->readString(this->dbName);
+           this->__isset.dbName = true;
+         } else {
+           xfer += iprot->skip(ftype);
+         }
+         break;
+       case 2:
+         if (ftype == ::apache::thrift::protocol::T_STRING) {
+           xfer += iprot->readString(this->tableName);
+           this->__isset.tableName = true;
+         } else {
+           xfer += iprot->skip(ftype);
+         }
+         break;
+       case 3:
+         if (ftype == ::apache::thrift::protocol::T_LIST) {
+           {
+             this->partNames.clear();
 -            uint32_t _size912;
 -            ::apache::thrift::protocol::TType _etype915;
 -            xfer += iprot->readListBegin(_etype915, _size912);
 -            this->partNames.resize(_size912);
 -            uint32_t _i916;
 -            for (_i916 = 0; _i916 < _size912; ++_i916)
++            uint32_t _size934;
++            ::apache::thrift::protocol::TType _etype937;
++            xfer += iprot->readListBegin(_etype937, _size934);
++            this->partNames.resize(_size934);
++            uint32_t _i938;
++            for (_i938 = 0; _i938 < _size934; ++_i938)
+             {
 -              xfer += iprot->readString(this->partNames[_i916]);
++              xfer += iprot->readString(this->partNames[_i938]);
+             }
+             xfer += iprot->readListEnd();
+           }
+           this->__isset.partNames = true;
+         } else {
+           xfer += iprot->skip(ftype);
+         }
+         break;
+       default:
+         xfer += iprot->skip(ftype);
+         break;
+     }
+     xfer += iprot->readFieldEnd();
+   }
+ 
+   xfer += iprot->readStructEnd();
+ 
+   return xfer;
+ }
+ 
+ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+   uint32_t xfer = 0;
+   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+   xfer += oprot->writeStructBegin("ThriftHiveMetastore_truncate_table_args");
+ 
+   xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
+   xfer += oprot->writeString(this->dbName);
+   xfer += oprot->writeFieldEnd();
+ 
+   xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2);
+   xfer += oprot->writeString(this->tableName);
+   xfer += oprot->writeFieldEnd();
+ 
+   xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
+   {
+     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
 -    std::vector<std::string> ::const_iterator _iter917;
 -    for (_iter917 = this->partNames.begin(); _iter917 != this->partNames.end(); ++_iter917)
++    std::vector<std::string> ::const_iterator _iter939;
++    for (_iter939 = this->partNames.begin(); _iter939 != this->partNames.end(); ++_iter939)
+     {
 -      xfer += oprot->writeString((*_iter917));
++      xfer += oprot->writeString((*_iter939));
+     }
+     xfer += oprot->writeListEnd();
+   }
+   xfer += oprot->writeFieldEnd();
+ 
+   xfer += oprot->writeFieldStop();
+   xfer += oprot->writeStructEnd();
+   return xfer;
+ }
+ 
+ 
+ ThriftHiveMetastore_truncate_table_pargs::~ThriftHiveMetastore_truncate_table_pargs() throw() {
+ }
+ 
+ 
+ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+   uint32_t xfer = 0;
+   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+   xfer += oprot->writeStructBegin("ThriftHiveMetastore_truncate_table_pargs");
+ 
+   xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
+   xfer += oprot->writeString((*(this->dbName)));
+   xfer += oprot->writeFieldEnd();
+ 
+   xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2);
+   xfer += oprot->writeString((*(this->tableName)));
+   xfer += oprot->writeFieldEnd();
+ 
+   xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
+   {
+     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
 -    std::vector<std::string> ::const_iterator _iter918;
 -    for (_iter918 = (*(this->partNames)).begin(); _iter918 != (*(this->partNames)).end(); ++_iter918)
++    std::vector<std::string> ::const_iterator _iter940;
++    for (_iter940 = (*(this->partNames)).begin(); _iter940 != (*(this->partNames)).end(); ++_iter940)
+     {
 -      xfer += oprot->writeString((*_iter918));
++      xfer += oprot->writeString((*_iter940));
+     }
+     xfer += oprot->writeListEnd();
+   }
+   xfer += oprot->writeFieldEnd();
+ 
+   xfer += oprot->writeFieldStop();
+   xfer += oprot->writeStructEnd();
+   return xfer;
+ }
+ 
+ 
+ ThriftHiveMetastore_truncate_table_result::~ThriftHiveMetastore_truncate_table_result() throw() {
+ }
+ 
+ 
+ uint32_t ThriftHiveMetastore_truncate_table_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+ 
+   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+   uint32_t xfer = 0;
+   std::string fname;
+   ::apache::thrift::protocol::TType ftype;
+   int16_t fid;
+ 
+   xfer += iprot->readStructBegin(fname);
+ 
+   using ::apache::thrift::protocol::TProtocolException;
+ 
+ 
+   while (true)
+   {
+     xfer += iprot->readFieldBegin(fname, ftype, fid);
+     if (ftype == ::apache::thrift::protocol::T_STOP) {
+       break;
+     }
+     switch (fid)
+     {
+       case 1:
+         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+           xfer += this->o1.read(iprot);
+           this->__isset.o1 = true;
+         } else {
+           xfer += iprot->skip(ftype);
+         }
+         break;
+       default:
+         xfer += iprot->skip(ftype);
+         break;
+     }
+     xfer += iprot->readFieldEnd();
+   }
+ 
+   xfer += iprot->readStructEnd();
+ 
+   return xfer;
+ }
+ 
+ uint32_t ThriftHiveMetastore_truncate_table_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ 
+   uint32_t xfer = 0;
+ 
+   xfer += oprot->writeStructBegin("ThriftHiveMetastore_truncate_table_result");
+ 
+   if (this->__isset.o1) {
+     xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+     xfer += this->o1.write(oprot);
+     xfer += oprot->writeFieldEnd();
+   }
+   xfer += oprot->writeFieldStop();
+   xfer += oprot->writeStructEnd();
+   return xfer;
+ }
+ 
+ 
+ ThriftHiveMetastore_truncate_table_presult::~ThriftHiveMetastore_truncate_table_presult() throw() {
+ }
+ 
+ 
+ uint32_t ThriftHiveMetastore_truncate_table_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+ 
+   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+   uint32_t xfer = 0;
+   std::string fname;
+   ::apache::thrift::protocol::TType ftype;
+   int16_t fid;
+ 
+   xfer += iprot->readStructBegin(fname);
+ 
+   using ::apache::thrift::protocol::TProtocolException;
+ 
+ 
+   while (true)
+   {
+     xfer += iprot->readFieldBegin(fname, ftype, fid);
+     if (ftype == ::apache::thrift::protocol::T_STOP) {
+       break;
+     }
+     switch (fid)
+     {
+       case 1:
+         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+           xfer += this->o1.read(iprot);
+           this->__isset.o1 = true;
+         } else {
+           xfer += iprot->skip(ftype);
+         }
+         break;
+       default:
+         xfer += iprot->skip(ftype);
+         break;
+     }
+     xfer += iprot->readFieldEnd();
+   }
+ 
+   xfer += iprot->readStructEnd();
+ 
+   return xfer;
+ }
+ 
+ 
  ThriftHiveMetastore_get_tables_args::~ThriftHiveMetastore_get_tables_args() throw() {
  }
  
@@@ -6055,14 -6302,14 +6302,14 @@@ uint32_t ThriftHiveMetastore_get_tables
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size935;
-             ::apache::thrift::protocol::TType _etype938;
-             xfer += iprot->readListBegin(_etype938, _size935);
-             this->success.resize(_size935);
-             uint32_t _i939;
-             for (_i939 = 0; _i939 < _size935; ++_i939)
 -            uint32_t _size919;
 -            ::apache::thrift::protocol::TType _etype922;
 -            xfer += iprot->readListBegin(_etype922, _size919);
 -            this->success.resize(_size919);
 -            uint32_t _i923;
 -            for (_i923 = 0; _i923 < _size919; ++_i923)
++            uint32_t _size941;
++            ::apache::thrift::protocol::TType _etype944;
++            xfer += iprot->readListBegin(_etype944, _size941);
++            this->success.resize(_size941);
++            uint32_t _i945;
++            for (_i945 = 0; _i945 < _size941; ++_i945)
              {
-               xfer += iprot->readString(this->success[_i939]);
 -              xfer += iprot->readString(this->success[_i923]);
++              xfer += iprot->readString(this->success[_i945]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -6101,10 -6348,10 +6348,10 @@@ uint32_t ThriftHiveMetastore_get_tables
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-       std::vector<std::string> ::const_iterator _iter940;
-       for (_iter940 = this->success.begin(); _iter940 != this->success.end(); ++_iter940)
 -      std::vector<std::string> ::const_iterator _iter924;
 -      for (_iter924 = this->success.begin(); _iter924 != this->success.end(); ++_iter924)
++      std::vector<std::string> ::const_iterator _iter946;
++      for (_iter946 = this->success.begin(); _iter946 != this->success.end(); ++_iter946)
        {
-         xfer += oprot->writeString((*_iter940));
 -        xfer += oprot->writeString((*_iter924));
++        xfer += oprot->writeString((*_iter946));
        }
        xfer += oprot->writeListEnd();
      }
@@@ -6149,14 -6396,14 +6396,14 @@@ uint32_t ThriftHiveMetastore_get_tables
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size941;
-             ::apache::thrift::protocol::TType _etype944;
-             xfer += iprot->readListBegin(_etype944, _size941);
-             (*(this->success)).resize(_size941);
-             uint32_t _i945;
-             for (_i945 = 0; _i945 < _size941; ++_i945)
 -            uint32_t _size925;
 -            ::apache::thrift::protocol::TType _etype928;
 -            xfer += iprot->readListBegin(_etype928, _size925);
 -            (*(this->success)).resize(_size925);
 -            uint32_t _i929;
 -            for (_i929 = 0; _i929 < _size925; ++_i929)
++            uint32_t _size947;
++            ::apache::thrift::protocol::TType _etype950;
++            xfer += iprot->readListBegin(_etype950, _size947);
++            (*(this->success)).resize(_size947);
++            uint32_t _i951;
++            for (_i951 = 0; _i951 < _size947; ++_i951)
              {
-               xfer += iprot->readString((*(this->success))[_i945]);
 -              xfer += iprot->readString((*(this->success))[_i929]);
++              xfer += iprot->readString((*(this->success))[_i951]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -6326,14 -6573,14 +6573,14 @@@ uint32_t ThriftHiveMetastore_get_tables
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size946;
-             ::apache::thrift::protocol::TType _etype949;
-             xfer += iprot->readListBegin(_etype949, _size946);
-             this->success.resize(_size946);
-             uint32_t _i950;
-             for (_i950 = 0; _i950 < _size946; ++_i950)
 -            uint32_t _size930;
 -            ::apache::thrift::protocol::TType _etype933;
 -            xfer += iprot->readListBegin(_etype933, _size930);
 -            this->success.resize(_size930);
 -            uint32_t _i934;
 -            for (_i934 = 0; _i934 < _size930; ++_i934)
++            uint32_t _size952;
++            ::apache::thrift::protocol::TType _etype955;
++            xfer += iprot->readListBegin(_etype955, _size952);
++            this->success.resize(_size952);
++            uint32_t _i956;
++            for (_i956 = 0; _i956 < _size952; ++_i956)
              {
-               xfer += iprot->readString(this->success[_i950]);
 -              xfer += iprot->readString(this->success[_i934]);
++              xfer += iprot->readString(this->success[_i956]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -6372,10 -6619,10 +6619,10 @@@ uint32_t ThriftHiveMetastore_get_tables
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-       std::vector<std::string> ::const_iterator _iter951;
-       for (_iter951 = this->success.begin(); _iter951 != this->success.end(); ++_iter951)
 -      std::vector<std::string> ::const_iterator _iter935;
 -      for (_iter935 = this->success.begin(); _iter935 != this->success.end(); ++_iter935)
++      std::vector<std::string> ::const_iterator _iter957;
++      for (_iter957 = this->success.begin(); _iter957 != this->success.end(); ++_iter957)
        {
-         xfer += oprot->writeString((*_iter951));
 -        xfer += oprot->writeString((*_iter935));
++        xfer += oprot->writeString((*_iter957));
        }
        xfer += oprot->writeListEnd();
      }
@@@ -6420,14 -6667,14 +6667,14 @@@ uint32_t ThriftHiveMetastore_get_tables
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size952;
-             ::apache::thrift::protocol::TType _etype955;
-             xfer += iprot->readListBegin(_etype955, _size952);
-             (*(this->success)).resize(_size952);
-             uint32_t _i956;
-             for (_i956 = 0; _i956 < _size952; ++_i956)
 -            uint32_t _size936;
 -            ::apache::thrift::protocol::TType _etype939;
 -            xfer += iprot->readListBegin(_etype939, _size936);
 -            (*(this->success)).resize(_size936);
 -            uint32_t _i940;
 -            for (_i940 = 0; _i940 < _size936; ++_i940)
++            uint32_t _size958;
++            ::apache::thrift::protocol::TType _etype961;
++            xfer += iprot->readListBegin(_etype961, _size958);
++            (*(this->success)).resize(_size958);
++            uint32_t _i962;
++            for (_i962 = 0; _i962 < _size958; ++_i962)
              {
-               xfer += iprot->readString((*(this->success))[_i956]);
 -              xfer += iprot->readString((*(this->success))[_i940]);
++              xfer += iprot->readString((*(this->success))[_i962]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -6502,14 -6749,14 +6749,14 @@@ uint32_t ThriftHiveMetastore_get_table_
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->tbl_types.clear();
-             uint32_t _size957;
-             ::apache::thrift::protocol::TType _etype960;
-             xfer += iprot->readListBegin(_etype960, _size957);
-             this->tbl_types.resize(_size957);
-             uint32_t _i961;
-             for (_i961 = 0; _i961 < _size957; ++_i961)
 -            uint32_t _size941;
 -            ::apache::thrift::protocol::TType _etype944;
 -            xfer += iprot->readListBegin(_etype944, _size941);
 -            this->tbl_types.resize(_size941);
 -            uint32_t _i945;
 -            for (_i945 = 0; _i945 < _size941; ++_i945)
++            uint32_t _size963;
++            ::apache::thrift::protocol::TType _etype966;
++            xfer += iprot->readListBegin(_etype966, _size963);
++            this->tbl_types.resize(_size963);
++            uint32_t _i967;
++            for (_i967 = 0; _i967 < _size963; ++_i967)
              {
-               xfer += iprot->readString(this->tbl_types[_i961]);
 -              xfer += iprot->readString(this->tbl_types[_i945]);
++              xfer += iprot->readString(this->tbl_types[_i967]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -6546,10 -6793,10 +6793,10 @@@ uint32_t ThriftHiveMetastore_get_table_
    xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
-     std::vector<std::string> ::const_iterator _iter962;
-     for (_iter962 = this->tbl_types.begin(); _iter962 != this->tbl_types.end(); ++_iter962)
 -    std::vector<std::string> ::const_iterator _iter946;
 -    for (_iter946 = this->tbl_types.begin(); _iter946 != this->tbl_types.end(); ++_iter946)
++    std::vector<std::string> ::const_iterator _iter968;
++    for (_iter968 = this->tbl_types.begin(); _iter968 != this->tbl_types.end(); ++_iter968)
      {
-       xfer += oprot->writeString((*_iter962));
 -      xfer += oprot->writeString((*_iter946));
++      xfer += oprot->writeString((*_iter968));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -6581,10 -6828,10 +6828,10 @@@ uint32_t ThriftHiveMetastore_get_table_
    xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
-     std::vector<std::string> ::const_iterator _iter963;
-     for (_iter963 = (*(this->tbl_types)).begin(); _iter963 != (*(this->tbl_types)).end(); ++_iter963)
 -    std::vector<std::string> ::const_iterator _iter947;
 -    for (_iter947 = (*(this->tbl_types)).begin(); _iter947 != (*(this->tbl_types)).end(); ++_iter947)
++    std::vector<std::string> ::const_iterator _iter969;
++    for (_iter969 = (*(this->tbl_types)).begin(); _iter969 != (*(this->tbl_types)).end(); ++_iter969)
      {
-       xfer += oprot->writeString((*_iter963));
 -      xfer += oprot->writeString((*_iter947));
++      xfer += oprot->writeString((*_iter969));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -6625,14 -6872,14 +6872,14 @@@ uint32_t ThriftHiveMetastore_get_table_
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size964;
-             ::apache::thrift::protocol::TType _etype967;
-             xfer += iprot->readListBegin(_etype967, _size964);
-             this->success.resize(_size964);
-             uint32_t _i968;
-             for (_i968 = 0; _i968 < _size964; ++_i968)
 -            uint32_t _size948;
 -            ::apache::thrift::protocol::TType _etype951;
 -            xfer += iprot->readListBegin(_etype951, _size948);
 -            this->success.resize(_size948);
 -            uint32_t _i952;
 -            for (_i952 = 0; _i952 < _size948; ++_i952)
++            uint32_t _size970;
++            ::apache::thrift::protocol::TType _etype973;
++            xfer += iprot->readListBegin(_etype973, _size970);
++            this->success.resize(_size970);
++            uint32_t _i974;
++            for (_i974 = 0; _i974 < _size970; ++_i974)
              {
-               xfer += this->success[_i968].read(iprot);
 -              xfer += this->success[_i952].read(iprot);
++              xfer += this->success[_i974].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -6671,10 -6918,10 +6918,10 @@@ uint32_t ThriftHiveMetastore_get_table_
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-       std::vector<TableMeta> ::const_iterator _iter969;
-       for (_iter969 = this->success.begin(); _iter969 != this->success.end(); ++_iter969)
 -      std::vector<TableMeta> ::const_iterator _iter953;
 -      for (_iter953 = this->success.begin(); _iter953 != this->success.end(); ++_iter953)
++      std::vector<TableMeta> ::const_iterator _iter975;
++      for (_iter975 = this->success.begin(); _iter975 != this->success.end(); ++_iter975)
        {
-         xfer += (*_iter969).write(oprot);
 -        xfer += (*_iter953).write(oprot);
++        xfer += (*_iter975).write(oprot);
        }
        xfer += oprot->writeListEnd();
      }
@@@ -6719,14 -6966,14 +6966,14 @@@ uint32_t ThriftHiveMetastore_get_table_
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size970;
-             ::apache::thrift::protocol::TType _etype973;
-             xfer += iprot->readListBegin(_etype973, _size970);
-             (*(this->success)).resize(_size970);
-             uint32_t _i974;
-             for (_i974 = 0; _i974 < _size970; ++_i974)
 -            uint32_t _size954;
 -            ::apache::thrift::protocol::TType _etype957;
 -            xfer += iprot->readListBegin(_etype957, _size954);
 -            (*(this->success)).resize(_size954);
 -            uint32_t _i958;
 -            for (_i958 = 0; _i958 < _size954; ++_i958)
++            uint32_t _size976;
++            ::apache::thrift::protocol::TType _etype979;
++            xfer += iprot->readListBegin(_etype979, _size976);
++            (*(this->success)).resize(_size976);
++            uint32_t _i980;
++            for (_i980 = 0; _i980 < _size976; ++_i980)
              {
-               xfer += (*(this->success))[_i974].read(iprot);
 -              xfer += (*(this->success))[_i958].read(iprot);
++              xfer += (*(this->success))[_i980].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -6864,14 -7111,14 +7111,14 @@@ uint32_t ThriftHiveMetastore_get_all_ta
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size975;
-             ::apache::thrift::protocol::TType _etype978;
-             xfer += iprot->readListBegin(_etype978, _size975);
-             this->success.resize(_size975);
-             uint32_t _i979;
-             for (_i979 = 0; _i979 < _size975; ++_i979)
 -            uint32_t _size959;
 -            ::apache::thrift::protocol::TType _etype962;
 -            xfer += iprot->readListBegin(_etype962, _size959);
 -            this->success.resize(_size959);
 -            uint32_t _i963;
 -            for (_i963 = 0; _i963 < _size959; ++_i963)
++            uint32_t _size981;
++            ::apache::thrift::protocol::TType _etype984;
++            xfer += iprot->readListBegin(_etype984, _size981);
++            this->success.resize(_size981);
++            uint32_t _i985;
++            for (_i985 = 0; _i985 < _size981; ++_i985)
              {
-               xfer += iprot->readString(this->success[_i979]);
 -              xfer += iprot->readString(this->success[_i963]);
++              xfer += iprot->readString(this->success[_i985]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -6910,10 -7157,10 +7157,10 @@@ uint32_t ThriftHiveMetastore_get_all_ta
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-       std::vector<std::string> ::const_iterator _iter980;
-       for (_iter980 = this->success.begin(); _iter980 != this->success.end(); ++_iter980)
 -      std::vector<std::string> ::const_iterator _iter964;
 -      for (_iter964 = this->success.begin(); _iter964 != this->success.end(); ++_iter964)
++      std::vector<std::string> ::const_iterator _iter986;
++      for (_iter986 = this->success.begin(); _iter986 != this->success.end(); ++_iter986)
        {
-         xfer += oprot->writeString((*_iter980));
 -        xfer += oprot->writeString((*_iter964));
++        xfer += oprot->writeString((*_iter986));
        }
        xfer += oprot->writeListEnd();
      }
@@@ -6958,14 -7205,14 +7205,14 @@@ uint32_t ThriftHiveMetastore_get_all_ta
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size981;
-             ::apache::thrift::protocol::TType _etype984;
-             xfer += iprot->readListBegin(_etype984, _size981);
-             (*(this->success)).resize(_size981);
-             uint32_t _i985;
-             for (_i985 = 0; _i985 < _size981; ++_i985)
 -            uint32_t _size965;
 -            ::apache::thrift::protocol::TType _etype968;
 -            xfer += iprot->readListBegin(_etype968, _size965);
 -            (*(this->success)).resize(_size965);
 -            uint32_t _i969;
 -            for (_i969 = 0; _i969 < _size965; ++_i969)
++            uint32_t _size987;
++            ::apache::thrift::protocol::TType _etype990;
++            xfer += iprot->readListBegin(_etype990, _size987);
++            (*(this->success)).resize(_size987);
++            uint32_t _i991;
++            for (_i991 = 0; _i991 < _size987; ++_i991)
              {
-               xfer += iprot->readString((*(this->success))[_i985]);
 -              xfer += iprot->readString((*(this->success))[_i969]);
++              xfer += iprot->readString((*(this->success))[_i991]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -7275,14 -7522,14 +7522,14 @@@ uint32_t ThriftHiveMetastore_get_table_
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->tbl_names.clear();
-             uint32_t _size986;
-             ::apache::thrift::protocol::TType _etype989;
-             xfer += iprot->readListBegin(_etype989, _size986);
-             this->tbl_names.resize(_size986);
-             uint32_t _i990;
-             for (_i990 = 0; _i990 < _size986; ++_i990)
 -            uint32_t _size970;
 -            ::apache::thrift::protocol::TType _etype973;
 -            xfer += iprot->readListBegin(_etype973, _size970);
 -            this->tbl_names.resize(_size970);
 -            uint32_t _i974;
 -            for (_i974 = 0; _i974 < _size970; ++_i974)
++            uint32_t _size992;
++            ::apache::thrift::protocol::TType _etype995;
++            xfer += iprot->readListBegin(_etype995, _size992);
++            this->tbl_names.resize(_size992);
++            uint32_t _i996;
++            for (_i996 = 0; _i996 < _size992; ++_i996)
              {
-               xfer += iprot->readString(this->tbl_names[_i990]);
 -              xfer += iprot->readString(this->tbl_names[_i974]);
++              xfer += iprot->readString(this->tbl_names[_i996]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -7315,10 -7562,10 +7562,10 @@@ uint32_t ThriftHiveMetastore_get_table_
    xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
-     std::vector<std::string> ::const_iterator _iter991;
-     for (_iter991 = this->tbl_names.begin(); _iter991 != this->tbl_names.end(); ++_iter991)
 -    std::vector<std::string> ::const_iterator _iter975;
 -    for (_iter975 = this->tbl_names.begin(); _iter975 != this->tbl_names.end(); ++_iter975)
++    std::vector<std::string> ::const_iterator _iter997;
++    for (_iter997 = this->tbl_names.begin(); _iter997 != this->tbl_names.end(); ++_iter997)
      {
-       xfer += oprot->writeString((*_iter991));
 -      xfer += oprot->writeString((*_iter975));
++      xfer += oprot->writeString((*_iter997));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -7346,10 -7593,10 +7593,10 @@@ uint32_t ThriftHiveMetastore_get_table_
    xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
-     std::vector<std::string> ::const_iterator _iter992;
-     for (_iter992 = (*(this->tbl_names)).begin(); _iter992 != (*(this->tbl_names)).end(); ++_iter992)
 -    std::vector<std::string> ::const_iterator _iter976;
 -    for (_iter976 = (*(this->tbl_names)).begin(); _iter976 != (*(this->tbl_names)).end(); ++_iter976)
++    std::vector<std::string> ::const_iterator _iter998;
++    for (_iter998 = (*(this->tbl_names)).begin(); _iter998 != (*(this->tbl_names)).end(); ++_iter998)
      {
-       xfer += oprot->writeString((*_iter992));
 -      xfer += oprot->writeString((*_iter976));
++      xfer += oprot->writeString((*_iter998));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -7390,14 -7637,14 +7637,14 @@@ uint32_t ThriftHiveMetastore_get_table_
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size993;
-             ::apache::thrift::protocol::TType _etype996;
-             xfer += iprot->readListBegin(_etype996, _size993);
-             this->success.resize(_size993);
-             uint32_t _i997;
-             for (_i997 = 0; _i997 < _size993; ++_i997)
 -            uint32_t _size977;
 -            ::apache::thrift::protocol::TType _etype980;
 -            xfer += iprot->readListBegin(_etype980, _size977);
 -            this->success.resize(_size977);
 -            uint32_t _i981;
 -            for (_i981 = 0; _i981 < _size977; ++_i981)
++            uint32_t _size999;
++            ::apache::thrift::protocol::TType _etype1002;
++            xfer += iprot->readListBegin(_etype1002, _size999);
++            this->success.resize(_size999);
++            uint32_t _i1003;
++            for (_i1003 = 0; _i1003 < _size999; ++_i1003)
              {
-               xfer += this->success[_i997].read(iprot);
 -              xfer += this->success[_i981].read(iprot);
++              xfer += this->success[_i1003].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -7428,10 -7675,10 +7675,10 @@@ uint32_t ThriftHiveMetastore_get_table_
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-       std::vector<Table> ::const_iterator _iter998;
-       for (_iter998 = this->success.begin(); _iter998 != this->success.end(); ++_iter998)
 -      std::vector<Table> ::const_iterator _iter982;
 -      for (_iter982 = this->success.begin(); _iter982 != this->success.end(); ++_iter982)
++      std::vector<Table> ::const_iterator _iter1004;
++      for (_iter1004 = this->success.begin(); _iter1004 != this->success.end(); ++_iter1004)
        {
-         xfer += (*_iter998).write(oprot);
 -        xfer += (*_iter982).write(oprot);
++        xfer += (*_iter1004).write(oprot);
        }
        xfer += oprot->writeListEnd();
      }
@@@ -7472,14 -7719,14 +7719,14 @@@ uint32_t ThriftHiveMetastore_get_table_
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size999;
-             ::apache::thrift::protocol::TType _etype1002;
-             xfer += iprot->readListBegin(_etype1002, _size999);
-             (*(this->success)).resize(_size999);
-             uint32_t _i1003;
-             for (_i1003 = 0; _i1003 < _size999; ++_i1003)
 -            uint32_t _size983;
 -            ::apache::thrift::protocol::TType _etype986;
 -            xfer += iprot->readListBegin(_etype986, _size983);
 -            (*(this->success)).resize(_size983);
 -            uint32_t _i987;
 -            for (_i987 = 0; _i987 < _size983; ++_i987)
++            uint32_t _size1005;
++            ::apache::thrift::protocol::TType _etype1008;
++            xfer += iprot->readListBegin(_etype1008, _size1005);
++            (*(this->success)).resize(_size1005);
++            uint32_t _i1009;
++            for (_i1009 = 0; _i1009 < _size1005; ++_i1009)
              {
-               xfer += (*(this->success))[_i1003].read(iprot);
 -              xfer += (*(this->success))[_i987].read(iprot);
++              xfer += (*(this->success))[_i1009].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -8115,14 -8362,14 +8362,14 @@@ uint32_t ThriftHiveMetastore_get_table_
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->success.clear();
-             uint32_t _size1004;
-             ::apache::thrift::protocol::TType _etype1007;
-             xfer += iprot->readListBegin(_etype1007, _size1004);
-             this->success.resize(_size1004);
-             uint32_t _i1008;
-             for (_i1008 = 0; _i1008 < _size1004; ++_i1008)
 -            uint32_t _size988;
 -            ::apache::thrift::protocol::TType _etype991;
 -            xfer += iprot->readListBegin(_etype991, _size988);
 -            this->success.resize(_size988);
 -            uint32_t _i992;
 -            for (_i992 = 0; _i992 < _size988; ++_i992)
++            uint32_t _size1010;
++            ::apache::thrift::protocol::TType _etype1013;
++            xfer += iprot->readListBegin(_etype1013, _size1010);
++            this->success.resize(_size1010);
++            uint32_t _i1014;
++            for (_i1014 = 0; _i1014 < _size1010; ++_i1014)
              {
-               xfer += iprot->readString(this->success[_i1008]);
 -              xfer += iprot->readString(this->success[_i992]);
++              xfer += iprot->readString(this->success[_i1014]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -8177,10 -8424,10 +8424,10 @@@ uint32_t ThriftHiveMetastore_get_table_
      xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
      {
        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-       std::vector<std::string> ::const_iterator _iter1009;
-       for (_iter1009 = this->success.begin(); _iter1009 != this->success.end(); ++_iter1009)
 -      std::vector<std::string> ::const_iterator _iter993;
 -      for (_iter993 = this->success.begin(); _iter993 != this->success.end(); ++_iter993)
++      std::vector<std::string> ::const_iterator _iter1015;
++      for (_iter1015 = this->success.begin(); _iter1015 != this->success.end(); ++_iter1015)
        {
-         xfer += oprot->writeString((*_iter1009));
 -        xfer += oprot->writeString((*_iter993));
++        xfer += oprot->writeString((*_iter1015));
        }
        xfer += oprot->writeListEnd();
      }
@@@ -8233,14 -8480,14 +8480,14 @@@ uint32_t ThriftHiveMetastore_get_table_
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              (*(this->success)).clear();
-             uint32_t _size1010;
-             ::apache::thrift::protocol::TType _etype1013;
-             xfer += iprot->readListBegin(_etype1013, _size1010);
-             (*(this->success)).resize(_size1010);
-             uint32_t _i1014;
-             for (_i1014 = 0; _i1014 < _size1010; ++_i1014)
 -            uint32_t _size994;
 -            ::apache::thrift::protocol::TType _etype997;
 -            xfer += iprot->readListBegin(_etype997, _size994);
 -            (*(this->success)).resize(_size994);
 -            uint32_t _i998;
 -            for (_i998 = 0; _i998 < _size994; ++_i998)
++            uint32_t _size1016;
++            ::apache::thrift::protocol::TType _etype1019;
++            xfer += iprot->readListBegin(_etype1019, _size1016);
++            (*(this->success)).resize(_size1016);
++            uint32_t _i1020;
++            for (_i1020 = 0; _i1020 < _size1016; ++_i1020)
              {
-               xfer += iprot->readString((*(this->success))[_i1014]);
 -              xfer += iprot->readString((*(this->success))[_i998]);
++              xfer += iprot->readString((*(this->success))[_i1020]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -9574,14 -9821,14 +9821,14 @@@ uint32_t ThriftHiveMetastore_add_partit
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->new_parts.clear();
-             uint32_t _size1015;
-             ::apache::thrift::protocol::TType _etype1018;
-             xfer += iprot->readListBegin(_etype1018, _size1015);
-             this->new_parts.resize(_size1015);
-             uint32_t _i1019;
-             for (_i1019 = 0; _i1019 < _size1015; ++_i1019)
 -            uint32_t _size999;
 -            ::apache::thrift::protocol::TType _etype1002;
 -            xfer += iprot->readListBegin(_etype1002, _size999);
 -            this->new_parts.resize(_size999);
 -            uint32_t _i1003;
 -            for (_i1003 = 0; _i1003 < _size999; ++_i1003)
++            uint32_t _size1021;
++            ::apache::thrift::protocol::TType _etype1024;
++            xfer += iprot->readListBegin(_etype1024, _size1021);
++            this->new_parts.resize(_size1021);
++            uint32_t _i1025;
++            for (_i1025 = 0; _i1025 < _size1021; ++_i1025)
              {
-               xfer += this->new_parts[_i1019].read(iprot);
 -              xfer += this->new_parts[_i1003].read(iprot);
++              xfer += this->new_parts[_i1025].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -9610,10 -9857,10 +9857,10 @@@ uint32_t ThriftHiveMetastore_add_partit
    xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-     std::vector<Partition> ::const_iterator _iter1020;
-     for (_iter1020 = this->new_parts.begin(); _iter1020 != this->new_parts.end(); ++_iter1020)
 -    std::vector<Partition> ::const_iterator _iter1004;
 -    for (_iter1004 = this->new_parts.begin(); _iter1004 != this->new_parts.end(); ++_iter1004)
++    std::vector<Partition> ::const_iterator _iter1026;
++    for (_iter1026 = this->new_parts.begin(); _iter1026 != this->new_parts.end(); ++_iter1026)
      {
-       xfer += (*_iter1020).write(oprot);
 -      xfer += (*_iter1004).write(oprot);
++      xfer += (*_iter1026).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -9637,10 -9884,10 +9884,10 @@@ uint32_t ThriftHiveMetastore_add_partit
    xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-     std::vector<Partition> ::const_iterator _iter1021;
-     for (_iter1021 = (*(this->new_parts)).begin(); _iter1021 != (*(this->new_parts)).end(); ++_iter1021)
 -    std::vector<Partition> ::const_iterator _iter1005;
 -    for (_iter1005 = (*(this->new_parts)).begin(); _iter1005 != (*(this->new_parts)).end(); ++_iter1005)
++    std::vector<Partition> ::const_iterator _iter1027;
++    for (_iter1027 = (*(this->new_parts)).begin(); _iter1027 != (*(this->new_parts)).end(); ++_iter1027)
      {
-       xfer += (*_iter1021).write(oprot);
 -      xfer += (*_iter1005).write(oprot);
++      xfer += (*_iter1027).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -9849,14 -10096,14 +10096,14 @@@ uint32_t ThriftHiveMetastore_add_partit
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->new_parts.clear();
-             uint32_t _size1022;
-             ::apache::thrift::protocol::TType _etype1025;
-             xfer += iprot->readListBegin(_etype1025, _size1022);
-             this->new_parts.resize(_size1022);
-             uint32_t _i1026;
-             for (_i1026 = 0; _i1026 < _size1022; ++_i1026)
 -            uint32_t _size1006;
 -            ::apache::thrift::protocol::TType _etype1009;
 -            xfer += iprot->readListBegin(_etype1009, _size1006);
 -            this->new_parts.resize(_size1006);
 -            uint32_t _i1010;
 -            for (_i1010 = 0; _i1010 < _size1006; ++_i1010)
++            uint32_t _size1028;
++            ::apache::thrift::protocol::TType _etype1031;
++            xfer += iprot->readListBegin(_etype1031, _size1028);
++            this->new_parts.resize(_size1028);
++            uint32_t _i1032;
++            for (_i1032 = 0; _i1032 < _size1028; ++_i1032)
              {
-               xfer += this->new_parts[_i1026].read(iprot);
 -              xfer += this->new_parts[_i1010].read(iprot);
++              xfer += this->new_parts[_i1032].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -9885,10 -10132,10 +10132,10 @@@ uint32_t ThriftHiveMetastore_add_partit
    xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-     std::vector<PartitionSpec> ::const_iterator _iter1027;
-     for (_iter1027 = this->new_parts.begin(); _iter1027 != this->new_parts.end(); ++_iter1027)
 -    std::vector<PartitionSpec> ::const_iterator _iter1011;
 -    for (_iter1011 = this->new_parts.begin(); _iter1011 != this->new_parts.end(); ++_iter1011)
++    std::vector<PartitionSpec> ::const_iterator _iter1033;
++    for (_iter1033 = this->new_parts.begin(); _iter1033 != this->new_parts.end(); ++_iter1033)
      {
-       xfer += (*_iter1027).write(oprot);
 -      xfer += (*_iter1011).write(oprot);
++      xfer += (*_iter1033).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -9912,10 -10159,10 +10159,10 @@@ uint32_t ThriftHiveMetastore_add_partit
    xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-     std::vector<PartitionSpec> ::const_iterator _iter1028;
-     for (_iter1028 = (*(this->new_parts)).begin(); _iter1028 != (*(this->new_parts)).end(); ++_iter1028)
 -    std::vector<PartitionSpec> ::const_iterator _iter1012;
 -    for (_iter1012 = (*(this->new_parts)).begin(); _iter1012 != (*(this->new_parts)).end(); ++_iter1012)
++    std::vector<PartitionSpec> ::const_iterator _iter1034;
++    for (_iter1034 = (*(this->new_parts)).begin(); _iter1034 != (*(this->new_parts)).end(); ++_iter1034)
      {
-       xfer += (*_iter1028).write(oprot);
 -      xfer += (*_iter1012).write(oprot);
++      xfer += (*_iter1034).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -10140,14 -10387,14 +10387,14 @@@ uint32_t ThriftHiveMetastore_append_par
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->part_vals.clear();
-             uint32_t _size1029;
-             ::apache::thrift::protocol::TType _etype1032;
-             xfer += iprot->readListBegin(_etype1032, _size1029);
-             this->part_vals.resize(_size1029);
-             uint32_t _i1033;
-             for (_i1033 = 0; _i1033 < _size1029; ++_i1033)
 -            uint32_t _size1013;
 -            ::apache::thrift::protocol::TType _etype1016;
 -            xfer += iprot->readListBegin(_etype1016, _size1013);
 -            this->part_vals.resize(_size1013);
 -            uint32_t _i1017;
 -            for (_i1017 = 0; _i1017 < _size1013; ++_i1017)
++            uint32_t _size1035;
++            ::apache::thrift::protocol::TType _etype1038;
++            xfer += iprot->readListBegin(_etype1038, _size1035);
++            this->part_vals.resize(_size1035);
++            uint32_t _i1039;
++            for (_i1039 = 0; _i1039 < _size1035; ++_i1039)
              {
-               xfer += iprot->readString(this->part_vals[_i1033]);
 -              xfer += iprot->readString(this->part_vals[_i1017]);
++              xfer += iprot->readString(this->part_vals[_i1039]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -10184,10 -10431,10 +10431,10 @@@ uint32_t ThriftHiveMetastore_append_par
    xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-     std::vector<std::string> ::const_iterator _iter1034;
-     for (_iter1034 = this->part_vals.begin(); _iter1034 != this->part_vals.end(); ++_iter1034)
 -    std::vector<std::string> ::const_iterator _iter1018;
 -    for (_iter1018 = this->part_vals.begin(); _iter1018 != this->part_vals.end(); ++_iter1018)
++    std::vector<std::string> ::const_iterator _iter1040;
++    for (_iter1040 = this->part_vals.begin(); _iter1040 != this->part_vals.end(); ++_iter1040)
      {
-       xfer += oprot->writeString((*_iter1034));
 -      xfer += oprot->writeString((*_iter1018));
++      xfer += oprot->writeString((*_iter1040));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -10219,10 -10466,10 +10466,10 @@@ uint32_t ThriftHiveMetastore_append_par
    xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-     std::vector<std::string> ::const_iterator _iter1035;
-     for (_iter1035 = (*(this->part_vals)).begin(); _iter1035 != (*(this->part_vals)).end(); ++_iter1035)
 -    std::vector<std::string> ::const_iterator _iter1019;
 -    for (_iter1019 = (*(this->part_vals)).begin(); _iter1019 != (*(this->part_vals)).end(); ++_iter1019)
++    std::vector<std::string> ::const_iterator _iter1041;
++    for (_iter1041 = (*(this->part_vals)).begin(); _iter1041 != (*(this->part_vals)).end(); ++_iter1041)
      {
-       xfer += oprot->writeString((*_iter1035));
 -      xfer += oprot->writeString((*_iter1019));
++      xfer += oprot->writeString((*_iter1041));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -10694,14 -10941,14 +10941,14 @@@ uint32_t ThriftHiveMetastore_append_par
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->part_vals.clear();
-             uint32_t _size1036;
-             ::apache::thrift::protocol::TType _etype1039;
-             xfer += iprot->readListBegin(_etype1039, _size1036);
-             this->part_vals.resize(_size1036);
-             uint32_t _i1040;
-             for (_i1040 = 0; _i1040 < _size1036; ++_i1040)
 -            uint32_t _size1020;
 -            ::apache::thrift::protocol::TType _etype1023;
 -            xfer += iprot->readListBegin(_etype1023, _size1020);
 -            this->part_vals.resize(_size1020);
 -            uint32_t _i1024;
 -            for (_i1024 = 0; _i1024 < _size1020; ++_i1024)
++            uint32_t _size1042;
++            ::apache::thrift::protocol::TType _etype1045;
++            xfer += iprot->readListBegin(_etype1045, _size1042);
++            this->part_vals.resize(_size1042);
++            uint32_t _i1046;
++            for (_i1046 = 0; _i1046 < _size1042; ++_i1046)
              {
-               xfer += iprot->readString(this->part_vals[_i1040]);
 -              xfer += iprot->readString(this->part_vals[_i1024]);
++              xfer += iprot->readString(this->part_vals[_i1046]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -10746,10 -10993,10 +10993,10 @@@ uint32_t ThriftHiveMetastore_append_par
    xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-     std::vector<std::string> ::const_iterator _iter1041;
-     for (_iter1041 = this->part_vals.begin(); _iter1041 != this->part_vals.end(); ++_iter1041)
 -    std::vector<std::string> ::const_iterator _iter1025;
 -    for (_iter1025 = this->part_vals.begin(); _iter1025 != this->part_vals.end(); ++_iter1025)
++    std::vector<std::string> ::const_iterator _iter1047;
++    for (_iter1047 = this->part_vals.begin(); _iter1047 != this->part_vals.end(); ++_iter1047)
      {
-       xfer += oprot->writeString((*_iter1041));
 -      xfer += oprot->writeString((*_iter1025));
++      xfer += oprot->writeString((*_iter1047));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -10785,10 -11032,10 +11032,10 @@@ uint32_t ThriftHiveMetastore_append_par
    xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-     std::vector<std::string> ::const_iterator _iter1042;
-     for (_iter1042 = (*(this->part_vals)).begin(); _iter1042 != (*(this->part_vals)).end(); ++_iter1042)
 -    std::vector<std::string> ::const_iterator _iter1026;
 -    for (_iter1026 = (*(this->part_vals)).begin(); _iter1026 != (*(this->part_vals)).end(); ++_iter1026)
++    std::vector<std::string> ::const_iterator _iter1048;
++    for (_iter1048 = (*(this->part_vals)).begin(); _iter1048 != (*(this->part_vals)).end(); ++_iter1048)
      {
-       xfer += oprot->writeString((*_iter1042));
 -      xfer += oprot->writeString((*_iter1026));
++      xfer += oprot->writeString((*_iter1048));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -11591,14 -11838,14 +11838,14 @@@ uint32_t ThriftHiveMetastore_drop_parti
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->part_vals.clear();
-             uint32_t _size1043;
-             ::apache::thrift::protocol::TType _etype1046;
-             xfer += iprot->readListBegin(_etype1046, _size1043);
-             this->part_vals.resize(_size1043);
-             uint32_t _i1047;
-             for (_i1047 = 0; _i1047 < _size1043; ++_i1047)
 -            uint32_t _size1027;
 -            ::apache::thrift::protocol::TType _etype1030;
 -            xfer += iprot->readListBegin(_etype1030, _size1027);
 -            this->part_vals.resize(_size1027);
 -            uint32_t _i1031;
 -            for (_i1031 = 0; _i1031 < _size1027; ++_i1031)
++            uint32_t _size1049;
++            ::apache::thrift::protocol::TType _etype1052;
++            xfer += iprot->readListBegin(_etype1052, _size1049);
++            this->part_vals.resize(_size1049);
++            uint32_t _i1053;
++            for (_i1053 = 0; _i1053 < _size1049; ++_i1053)
              {
-               xfer += iprot->readString(this->part_vals[_i1047]);
 -              xfer += iprot->readString(this->part_vals[_i1031]);
++              xfer += iprot->readString(this->part_vals[_i1053]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -11643,10 -11890,10 +11890,10 @@@ uint32_t ThriftHiveMetastore_drop_parti
    xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-     std::vector<std::string> ::const_iterator _iter1048;
-     for (_iter1048 = this->part_vals.begin(); _iter1048 != this->part_vals.end(); ++_iter1048)
 -    std::vector<std::string> ::const_iterator _iter1032;
 -    for (_iter1032 = this->part_vals.begin(); _iter1032 != this->part_vals.end(); ++_iter1032)
++    std::vector<std::string> ::const_iterator _iter1054;
++    for (_iter1054 = this->part_vals.begin(); _iter1054 != this->part_vals.end(); ++_iter1054)
      {
-       xfer += oprot->writeString((*_iter1048));
 -      xfer += oprot->writeString((*_iter1032));
++      xfer += oprot->writeString((*_iter1054));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -11682,10 -11929,10 +11929,10 @@@ uint32_t ThriftHiveMetastore_drop_parti
    xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-     std::vector<std::string> ::const_iterator _iter1049;
-     for (_iter1049 = (*(this->part_vals)).begin(); _iter1049 != (*(this->part_vals)).end(); ++_iter1049)
 -    std::vector<std::string> ::const_iterator _iter1033;
 -    for (_iter1033 = (*(this->part_vals)).begin(); _iter1033 != (*(this->part_vals)).end(); ++_iter1033)
++    std::vector<std::string> ::const_iterator _iter1055;
++    for (_iter1055 = (*(this->part_vals)).begin(); _iter1055 != (*(this->part_vals)).end(); ++_iter1055)
      {
-       xfer += oprot->writeString((*_iter1049));
 -      xfer += oprot->writeString((*_iter1033));
++      xfer += oprot->writeString((*_iter1055));
      }
      xfer += oprot->writeListEnd();
    }
@@@ -11894,14 -12141,14 +12141,14 @@@ uint32_t ThriftHiveMetastore_drop_parti
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->part_vals.clear();
-             uint32_t _size1050;
-             ::apache::thrift::protocol::TType _etype1053;
-             xfer += iprot->readListBegin(_etype1053, _size1050);
-             this->part_vals.resize(_size1050);
-             uint32_t _i1054;
-             for (_i1054 = 0; _i1054 < _size1050; ++_i1054)
 -            uint32_t _size1034;
 -            ::apache::thrift::protocol::TType _etype1037;
 -            xfer += iprot->readListBegin(_etype1037, _size1034);
 -            this->part_vals.resize(_size1034);
 -            uint32_t _i1038;
 -            for (_i1038 = 0; _i1038 < _size1034; ++_i1038)
++            uint32_t _size1056;
++            ::apache::thrift::protocol::TType _etype1059;
++            xfer += iprot->readListBegin(_etype1059, _size1056);
++            this->part_vals.resize(_size1056);
++            uint32_t _i1060;
++            for (_i1060 = 0; _i1060 < _size1056; ++_i1060)
              {
-               xfer += iprot->readString(this->part_vals[_i1054]);
 -              xfer += iprot->readString(this->part_vals[_i1038]);
++              xfer += iprot->readString(this->part_vals[_i1060]);
              }
              xfer += iprot->readListEnd();
            }
@@@ -11954,10 -12201,10 +12201,10 @@@ uint32_t ThriftHiveMetastore_drop_parti
    xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-     std::vector<std::string> ::const_iterator _iter1055;
-     for (_iter1055 = this->part_vals.begin(); _iter1055 != this->part_vals.end(); ++_iter1055)
 -    std::vector<std::string> ::const_iterator _iter1039;
 -    for (_iter1039 = this->part_vals.begin(); _iter1039 != this->part_vals.end(); ++_iter1039)
++    std::vector<std::string> ::const_iterator _iter1061;
++    for (_iter1061 = this->part_vals.begin(); _iter

<TRUNCATED>
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------


[29/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
index 7037f97..1509926 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
@@ -145,7 +145,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: boolean), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: boolean), _col13 (type: boolean), _col14 (type: boolean), _col15 (type: boolean), _col16 (type: boolean), _col17 (type: boolean), _col18 (type: boolean), _col19 (type: boolean), _col20 (type: boolean), _col21 (type: boolean), _col22 (type: boolean), _col23 (type: boolean), _col24 (type: boolean)
             Execution mode: vectorized, llap
@@ -352,7 +352,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: boolean), _col10 (type: boolean), _col11 (type: boolean), _col13 (type: boolean), _col14 (type: boolean), _col15 (type: boolean), _col16 (type: boolean), _col17 (type: boolean)
             Execution mode: vectorized, llap
@@ -559,7 +559,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: boolean), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: boolean), _col13 (type: boolean), _col14 (type: boolean), _col15 (type: boolean), _col16 (type: boolean), _col17 (type: boolean), _col18 (type: boolean), _col19 (type: boolean), _col20 (type: boolean), _col21 (type: boolean), _col22 (type: boolean), _col23 (type: boolean), _col24 (type: boolean)
             Execution mode: vectorized, llap
@@ -766,7 +766,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: boolean), _col10 (type: boolean), _col11 (type: boolean), _col13 (type: boolean), _col14 (type: boolean), _col15 (type: boolean), _col16 (type: boolean), _col17 (type: boolean)
             Execution mode: vectorized, llap
@@ -965,7 +965,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1160,7 +1160,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1345,7 +1345,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1530,7 +1530,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1725,7 +1725,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1920,7 +1920,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
index aadb6e7..1d14092 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
@@ -99,7 +99,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date)
             Execution mode: vectorized, llap
@@ -282,7 +282,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
             Execution mode: vectorized, llap
@@ -465,7 +465,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap
@@ -641,10 +641,10 @@ STAGE PLANS:
                       key expressions: CAST( 5-5 AS INTERVAL YEAR TO MONTH) (type: interval_year_month)
                       sort order: +
                       Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false
+                          className: VectorReduceSinkObjectHashOperator
+                          keyExpressions: VectorUDFAdaptor(CAST( 5-5 AS INTERVAL YEAR TO MONTH)) -> 2:interval_year_month
+                          native: true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                       TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -654,8 +654,8 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
+                allNative: true
+                usesVectorUDFAdaptor: true
                 vectorized: true
         Reducer 2 
             Execution mode: vectorized, llap
@@ -784,7 +784,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap
@@ -969,7 +969,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time)
             Execution mode: vectorized, llap
@@ -1154,7 +1154,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
index 6d828a5..0024fea 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
@@ -293,7 +293,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 458448 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join30.q.out b/ql/src/test/results/clientpositive/llap/vector_join30.q.out
index 6af0959..394393e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_join30.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_join30.q.out
@@ -73,7 +73,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -114,7 +114,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -175,7 +175,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)
         Reducer 3 
@@ -234,7 +234,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
 
@@ -319,7 +319,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -353,7 +353,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -414,7 +414,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)
         Reducer 3 
@@ -473,7 +473,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
 
@@ -558,7 +558,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -592,7 +592,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -630,7 +630,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
             Execution mode: vectorized, llap
@@ -680,7 +680,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)
         Reducer 5 
@@ -810,7 +810,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -851,7 +851,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -893,7 +893,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -958,7 +958,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)
         Reducer 3 
@@ -1017,7 +1017,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 7 
@@ -1045,7 +1045,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -1143,7 +1143,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1177,7 +1177,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1212,7 +1212,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1250,7 +1250,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: llap
@@ -1334,7 +1334,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 8 
@@ -1362,7 +1362,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -1460,7 +1460,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1494,7 +1494,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1529,7 +1529,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1567,7 +1567,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: llap
@@ -1651,7 +1651,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 8 
@@ -1679,7 +1679,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -1777,7 +1777,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1811,7 +1811,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1846,7 +1846,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1884,7 +1884,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: llap
@@ -1968,7 +1968,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 8 
@@ -1996,7 +1996,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -2094,7 +2094,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -2128,7 +2128,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -2163,7 +2163,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -2201,7 +2201,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: llap
@@ -2285,7 +2285,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
         Reducer 8 
@@ -2313,7 +2313,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out b/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
index 95dcba9..8d40a6d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_join_part_col_char.q.out
@@ -111,24 +111,24 @@ Stage-0
       Reducer 2 llap
       File Output Operator [FS_10]
         Merge Join Operator [MERGEJOIN_21] (rows=2 width=431)
-          Conds:RS_23._col2=RS_28._col2(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          Conds:RS_6._col2=RS_7._col2(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
         <-Map 1 [SIMPLE_EDGE] vectorized, llap
-          SHUFFLE [RS_23]
+          SHUFFLE [RS_6]
             PartitionCols:_col2
-            Select Operator [SEL_22] (rows=2 width=134)
+            Select Operator [SEL_2] (rows=2 width=134)
               Output:["_col0","_col1","_col2"]
               TableScan [TS_0] (rows=2 width=236)
                 default@char_tbl1,c1,Tbl:COMPLETE,Col:PARTIAL,Output:["name","age"]
-          Dynamic Partitioning Event Operator [EVENT_26] (rows=1 width=134)
-            Group By Operator [GBY_25] (rows=1 width=134)
+          Dynamic Partitioning Event Operator [EVENT_20] (rows=1 width=134)
+            Group By Operator [GBY_19] (rows=1 width=134)
               Output:["_col0"],keys:_col0
-              Select Operator [SEL_24] (rows=2 width=134)
+              Select Operator [SEL_18] (rows=2 width=134)
                 Output:["_col0"]
-                 Please refer to the previous Select Operator [SEL_22]
+                 Please refer to the previous Select Operator [SEL_2]
         <-Map 3 [SIMPLE_EDGE] vectorized, llap
-          SHUFFLE [RS_28]
+          SHUFFLE [RS_7]
             PartitionCols:_col2
-            Select Operator [SEL_27] (rows=2 width=89)
+            Select Operator [SEL_5] (rows=2 width=89)
               Output:["_col0","_col1","_col2"]
               TableScan [TS_3] (rows=2 width=190)
                 default@char_tbl2,c2,Tbl:COMPLETE,Col:PARTIAL,Output:["name","age"]

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
index c9b794e..20f6acc 100644
--- a/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
@@ -371,7 +371,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: char(2))
             Execution mode: vectorized, llap
@@ -511,7 +511,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: char(2))
             Execution mode: vectorized, llap
@@ -650,7 +650,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: char(2))
             Execution mode: vectorized, llap
@@ -789,7 +789,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: char(2))
             Execution mode: vectorized, llap


[28/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
index 6c6d0f3..445e585 100644
--- a/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
@@ -3366,7 +3366,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3394,7 +3394,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3477,7 +3477,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3505,7 +3505,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3590,7 +3590,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3618,7 +3618,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3698,7 +3698,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3726,7 +3726,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3814,7 +3814,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3842,7 +3842,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3924,7 +3924,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -3953,7 +3953,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4035,7 +4035,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4064,7 +4064,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4143,7 +4143,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4169,7 +4169,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4251,7 +4251,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4279,7 +4279,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4375,7 +4375,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4403,7 +4403,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4487,7 +4487,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4508,7 +4508,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4536,7 +4536,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4629,7 +4629,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4657,7 +4657,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkMultiKeyOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4748,7 +4748,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4776,7 +4776,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4804,7 +4804,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4894,7 +4894,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4912,7 +4912,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -4937,7 +4937,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5034,7 +5034,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5052,7 +5052,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5077,7 +5077,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5178,7 +5178,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5203,7 +5203,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5221,7 +5221,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5322,7 +5322,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5347,7 +5347,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5365,7 +5365,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5468,7 +5468,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5493,7 +5493,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5511,7 +5511,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5638,7 +5638,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5666,7 +5666,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5684,7 +5684,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5827,7 +5827,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkStringOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -5911,7 +5911,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -5969,7 +5969,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6102,7 +6102,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6160,7 +6160,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6295,7 +6295,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6353,7 +6353,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6488,7 +6488,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6546,7 +6546,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6684,7 +6684,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6742,7 +6742,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6877,7 +6877,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -6938,7 +6938,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7073,7 +7073,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7134,7 +7134,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7266,7 +7266,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7319,7 +7319,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7451,7 +7451,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7509,7 +7509,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7655,7 +7655,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7714,7 +7714,7 @@ STAGE PLANS:
                               className: VectorReduceSinkLongOperator
                               keyExpressions: LongScalarMultiplyLongColumn(val 2, col 0) -> 1:long
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -7857,7 +7857,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col2 (type: int), _col3 (type: string)
             Execution mode: vectorized, llap
@@ -7898,7 +7898,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
@@ -7957,7 +7957,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8100,7 +8100,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8158,7 +8158,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkMultiKeyOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8302,7 +8302,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8360,7 +8360,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8418,7 +8418,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8557,7 +8557,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8589,7 +8589,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8640,7 +8640,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8774,7 +8774,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8806,7 +8806,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -8857,7 +8857,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9011,7 +9011,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9062,7 +9062,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9094,7 +9094,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9248,7 +9248,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9299,7 +9299,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9331,7 +9331,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9487,7 +9487,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9538,7 +9538,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9570,7 +9570,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9773,7 +9773,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 26 Data size: 2475 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9831,7 +9831,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -9863,7 +9863,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -10094,7 +10094,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkStringOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -10190,7 +10190,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -10248,7 +10248,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -10384,7 +10384,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -10442,7 +10442,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS 

<TRUNCATED>

[10/50] [abbrv] hive git commit: HIVE-16267 : Enable bootstrap function metadata to be loaded in repl load (Anishek Agarwal, reviewed by Sushanth Sowmyan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/PartitionSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/PartitionSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/PartitionSerializer.java
new file mode 100644
index 0000000..077d39b
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/PartitionSerializer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.thrift.TException;
+import org.apache.thrift.TSerializer;
+import org.apache.thrift.protocol.TJSONProtocol;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class PartitionSerializer implements JsonWriter.Serializer {
+  public static final String FIELD_NAME="partitions";
+  private Partition partition;
+
+  PartitionSerializer(Partition partition) {
+    this.partition = partition;
+  }
+
+  @Override
+  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
+      throws SemanticException, IOException {
+    TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
+    try {
+      if (additionalPropertiesProvider.isInReplicationScope()) {
+        partition.putToParameters(
+            ReplicationSpec.KEY.CURR_STATE_ID.toString(),
+            additionalPropertiesProvider.getCurrentReplicationState());
+        if (isPartitionExternal()) {
+          // Replication destination will not be external
+          partition.putToParameters("EXTERNAL", "FALSE");
+        }
+      }
+      writer.jsonGenerator.writeString(serializer.toString(partition, UTF_8));
+      writer.jsonGenerator.flush();
+    } catch (TException e) {
+      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
+    }
+  }
+
+  private boolean isPartitionExternal() {
+    Map<String, String> params = partition.getParameters();
+    return params.containsKey("EXTERNAL")
+        && params.get("EXTERNAL").equalsIgnoreCase("TRUE");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/ReplicationSpecSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/ReplicationSpecSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/ReplicationSpecSerializer.java
new file mode 100644
index 0000000..3a92e8a
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/ReplicationSpecSerializer.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+import java.io.IOException;
+
+public class ReplicationSpecSerializer implements JsonWriter.Serializer {
+  @Override
+  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
+      throws SemanticException, IOException {
+    for (ReplicationSpec.KEY key : ReplicationSpec.KEY.values()) {
+      String value = additionalPropertiesProvider.get(key);
+      if (value != null) {
+        writer.jsonGenerator.writeStringField(key.toString(), value);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/TableSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/TableSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/TableSerializer.java
new file mode 100644
index 0000000..948cb39
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/TableSerializer.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.thrift.TException;
+import org.apache.thrift.TSerializer;
+import org.apache.thrift.protocol.TJSONProtocol;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class TableSerializer implements JsonWriter.Serializer {
+  public static final String FIELD_NAME = "table";
+  private final org.apache.hadoop.hive.ql.metadata.Table tableHandle;
+  private final Iterable<Partition> partitions;
+
+  public TableSerializer(org.apache.hadoop.hive.ql.metadata.Table tableHandle,
+      Iterable<Partition> partitions) {
+    this.tableHandle = tableHandle;
+    this.partitions = partitions;
+  }
+
+  @Override
+  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
+      throws SemanticException, IOException {
+    if (cannotReplicateTable(additionalPropertiesProvider)) {
+      return;
+    }
+
+    Table tTable = tableHandle.getTTable();
+    tTable = addPropertiesToTable(tTable, additionalPropertiesProvider);
+    try {
+      TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
+      writer.jsonGenerator
+          .writeStringField(FIELD_NAME, serializer.toString(tTable, UTF_8));
+      writer.jsonGenerator.writeFieldName(PartitionSerializer.FIELD_NAME);
+      writePartitions(writer, additionalPropertiesProvider);
+    } catch (TException e) {
+      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
+    }
+  }
+
+  private boolean cannotReplicateTable(ReplicationSpec additionalPropertiesProvider) {
+    return tableHandle == null || additionalPropertiesProvider.isNoop();
+  }
+
+  private Table addPropertiesToTable(Table table, ReplicationSpec additionalPropertiesProvider)
+      throws SemanticException, IOException {
+    if (additionalPropertiesProvider.isInReplicationScope()) {
+      table.putToParameters(
+            ReplicationSpec.KEY.CURR_STATE_ID.toString(),
+            additionalPropertiesProvider.getCurrentReplicationState());
+      if (isExternalTable(table)) {
+          // Replication destination will not be external - override if set
+        table.putToParameters("EXTERNAL", "FALSE");
+        }
+      if (isExternalTableType(table)) {
+          // Replication dest will not be external - override if set
+        table.setTableType(TableType.MANAGED_TABLE.toString());
+        }
+    } else {
+      // ReplicationSpec.KEY scopeKey = ReplicationSpec.KEY.REPL_SCOPE;
+      // write(out, ",\""+ scopeKey.toString() +"\":\"" + replicationSpec.get(scopeKey) + "\"");
+      // TODO: if we want to be explicit about this dump not being a replication dump, we can
+      // uncomment this else section, but currently unnneeded. Will require a lot of golden file
+      // regen if we do so.
+    }
+    return table;
+  }
+
+  private boolean isExternalTableType(org.apache.hadoop.hive.metastore.api.Table table) {
+    return table.isSetTableType()
+        && table.getTableType().equalsIgnoreCase(TableType.EXTERNAL_TABLE.toString());
+  }
+
+  private boolean isExternalTable(org.apache.hadoop.hive.metastore.api.Table table) {
+    Map<String, String> params = table.getParameters();
+    return params.containsKey("EXTERNAL")
+        && params.get("EXTERNAL").equalsIgnoreCase("TRUE");
+  }
+
+  private void writePartitions(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
+      throws SemanticException, IOException {
+    writer.jsonGenerator.writeStartArray();
+    if (partitions != null) {
+      for (org.apache.hadoop.hive.ql.metadata.Partition partition : partitions) {
+        new PartitionSerializer(partition.getTPartition())
+            .writeTo(writer, additionalPropertiesProvider);
+      }
+    }
+    writer.jsonGenerator.writeEndArray();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/VersionCompatibleSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/VersionCompatibleSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/VersionCompatibleSerializer.java
new file mode 100644
index 0000000..8201173
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/VersionCompatibleSerializer.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.hive.ql.parse.EximUtil.METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION;
+
+/**
+ * This is not used as of now as the conditional which lead to its usage is always false
+ * hence we have removed the conditional and the usage of this class, but might be required in future.
+ */
+public class VersionCompatibleSerializer implements JsonWriter.Serializer {
+  @Override
+  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
+      throws SemanticException, IOException {
+    writer.jsonGenerator.writeStringField("fcversion", METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java
index 9a4f8b9..1616ab9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java
@@ -35,7 +35,7 @@ import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.util.Iterator;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
 
 public class AddPartitionHandler extends AbstractHandler {
   protected AddPartitionHandler(NotificationEvent notificationEvent) {
@@ -108,7 +108,7 @@ public class AddPartitionHandler extends AbstractHandler {
   }
 
   @Override
-  public DUMPTYPE dumpType() {
-    return DUMPTYPE.EVENT_ADD_PARTITION;
+  public DumpType dumpType() {
+    return DumpType.EVENT_ADD_PARTITION;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java
index 20d04dc..b6c3496 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java
@@ -23,14 +23,14 @@ import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.EximUtil;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
 
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 public class AlterPartitionHandler extends AbstractHandler {
   private final org.apache.hadoop.hive.metastore.api.Partition after;
@@ -51,24 +51,24 @@ public class AlterPartitionHandler extends AbstractHandler {
   private enum Scenario {
     ALTER {
       @Override
-      DUMPTYPE dumpType() {
-        return DUMPTYPE.EVENT_ALTER_PARTITION;
+      DumpType dumpType() {
+        return DumpType.EVENT_ALTER_PARTITION;
       }
     },
     RENAME {
       @Override
-      DUMPTYPE dumpType() {
-        return DUMPTYPE.EVENT_RENAME_PARTITION;
+      DumpType dumpType() {
+        return DumpType.EVENT_RENAME_PARTITION;
       }
     },
     TRUNCATE {
       @Override
-      DUMPTYPE dumpType() {
-        return DUMPTYPE.EVENT_TRUNCATE_PARTITION;
+      DumpType dumpType() {
+        return DumpType.EVENT_TRUNCATE_PARTITION;
       }
     };
 
-    abstract DUMPTYPE dumpType();
+    abstract DumpType dumpType();
   }
 
   private Scenario scenarioType(org.apache.hadoop.hive.metastore.api.Partition before,
@@ -90,14 +90,14 @@ public class AlterPartitionHandler extends AbstractHandler {
     if (Scenario.ALTER == scenario) {
       withinContext.replicationSpec.setIsMetadataOnly(true);
       Table qlMdTable = new Table(tableObject);
-      List<Partition> qlPtns = new ArrayList<>();
-      qlPtns.add(new Partition(qlMdTable, after));
+      List<Partition> partitions = new ArrayList<>();
+      partitions.add(new Partition(qlMdTable, after));
       Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
       EximUtil.createExportDump(
           metaDataPath.getFileSystem(withinContext.hiveConf),
           metaDataPath,
           qlMdTable,
-          qlPtns,
+          partitions,
           withinContext.replicationSpec);
     }
     DumpMetaData dmd = withinContext.createDmd(this);
@@ -106,7 +106,7 @@ public class AlterPartitionHandler extends AbstractHandler {
   }
 
   @Override
-  public DUMPTYPE dumpType() {
+  public DumpType dumpType() {
     return scenario.dumpType();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java
index bfe0181..d553240 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java
@@ -22,13 +22,12 @@ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.EximUtil;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 public class AlterTableHandler extends AbstractHandler {
-  private final org.apache.hadoop.hive.metastore.api.Table before;
   private final org.apache.hadoop.hive.metastore.api.Table after;
   private final boolean isTruncateOp;
   private final Scenario scenario;
@@ -36,30 +35,30 @@ public class AlterTableHandler extends AbstractHandler {
   private enum Scenario {
     ALTER {
       @Override
-      DUMPTYPE dumpType() {
-        return DUMPTYPE.EVENT_ALTER_TABLE;
+      DumpType dumpType() {
+        return DumpType.EVENT_ALTER_TABLE;
       }
     },
     RENAME {
       @Override
-      DUMPTYPE dumpType() {
-        return DUMPTYPE.EVENT_RENAME_TABLE;
+      DumpType dumpType() {
+        return DumpType.EVENT_RENAME_TABLE;
       }
     },
     TRUNCATE {
       @Override
-      DUMPTYPE dumpType() {
-        return DUMPTYPE.EVENT_TRUNCATE_TABLE;
+      DumpType dumpType() {
+        return DumpType.EVENT_TRUNCATE_TABLE;
       }
     };
 
-    abstract DUMPTYPE dumpType();
+    abstract DumpType dumpType();
   }
 
   AlterTableHandler(NotificationEvent event) throws Exception {
     super(event);
     AlterTableMessage atm = deserializer.getAlterTableMessage(event.getMessage());
-    before = atm.getTableObjBefore();
+    org.apache.hadoop.hive.metastore.api.Table before = atm.getTableObjBefore();
     after = atm.getTableObjAfter();
     isTruncateOp = atm.getIsTruncateOp();
     scenario = scenarioType(before, after);
@@ -97,7 +96,7 @@ public class AlterTableHandler extends AbstractHandler {
   }
 
   @Override
-  public DUMPTYPE dumpType() {
+  public DumpType dumpType() {
     return scenario.dumpType();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java
index 03f400d..88600fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java
@@ -28,7 +28,7 @@ import java.io.BufferedWriter;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
 
 public class CreateTableHandler extends AbstractHandler {
 
@@ -80,7 +80,7 @@ public class CreateTableHandler extends AbstractHandler {
   }
 
   @Override
-  public DUMPTYPE dumpType() {
-    return DUMPTYPE.EVENT_CREATE_TABLE;
+  public DumpType dumpType() {
+    return DumpType.EVENT_CREATE_TABLE;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java
index 61c5f37..78cd74f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java
@@ -19,8 +19,9 @@ package org.apache.hadoop.hive.ql.parse.repl.events;
 
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 public class DefaultHandler extends AbstractHandler {
 
@@ -37,7 +38,7 @@ public class DefaultHandler extends AbstractHandler {
   }
 
   @Override
-  public DUMPTYPE dumpType() {
-    return DUMPTYPE.EVENT_UNKNOWN;
+  public DumpType dumpType() {
+    return DumpType.EVENT_UNKNOWN;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java
index 3ad794e..c4a0908 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java
@@ -19,8 +19,9 @@ package org.apache.hadoop.hive.ql.parse.repl.events;
 
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 public class DropPartitionHandler extends AbstractHandler {
 
@@ -37,7 +38,7 @@ public class DropPartitionHandler extends AbstractHandler {
   }
 
   @Override
-  public DUMPTYPE dumpType() {
-    return DUMPTYPE.EVENT_DROP_PARTITION;
+  public DumpType dumpType() {
+    return DumpType.EVENT_DROP_PARTITION;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java
index cae379b..e3addaf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java
@@ -19,8 +19,9 @@ package org.apache.hadoop.hive.ql.parse.repl.events;
 
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 public class DropTableHandler extends AbstractHandler {
 
@@ -37,7 +38,7 @@ public class DropTableHandler extends AbstractHandler {
   }
 
   @Override
-  public DUMPTYPE dumpType() {
-    return DUMPTYPE.EVENT_DROP_TABLE;
+  public DumpType dumpType() {
+    return DumpType.EVENT_DROP_TABLE;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java
index 199145a..29f3b42 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java
@@ -22,8 +22,8 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DumpMetaData;
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
 
 public interface EventHandler {
   void handle(Context withinContext) throws Exception;
@@ -32,7 +32,7 @@ public interface EventHandler {
 
   long toEventId();
 
-  DUMPTYPE dumpType();
+  DumpType dumpType();
 
   class Context {
     final Path eventRoot, cmRoot;

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java
index e9f2a6a..910b396 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java
@@ -32,8 +32,9 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DUMPTYPE;
-import static org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 
 public class InsertHandler extends AbstractHandler {
 
@@ -103,7 +104,7 @@ public class InsertHandler extends AbstractHandler {
   }
 
   @Override
-  public DUMPTYPE dumpType() {
-    return DUMPTYPE.EVENT_INSERT;
+  public DumpType dumpType() {
+    return DumpType.EVENT_INSERT;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java
new file mode 100644
index 0000000..12ad19b
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.ReplChangeManager;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.Arrays;
+
+public class DumpMetaData {
+  // wrapper class for reading and writing metadata about a dump
+  // responsible for _dumpmetadata files
+  private static final String DUMP_METADATA = "_dumpmetadata";
+
+  private DumpType dumpType;
+  private Long eventFrom = null;
+  private Long eventTo = null;
+  private String payload = null;
+  private boolean initialized = false;
+
+  private final Path dumpFile;
+  private final HiveConf hiveConf;
+  private Path cmRoot;
+
+  public DumpMetaData(Path dumpRoot, HiveConf hiveConf) {
+    this.hiveConf = hiveConf;
+    dumpFile = new Path(dumpRoot, DUMP_METADATA);
+  }
+
+  public DumpMetaData(Path dumpRoot, DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot,
+      HiveConf hiveConf) {
+    this(dumpRoot, hiveConf);
+    setDump(lvl, eventFrom, eventTo, cmRoot);
+  }
+
+  public void setDump(DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot) {
+    this.dumpType = lvl;
+    this.eventFrom = eventFrom;
+    this.eventTo = eventTo;
+    this.initialized = true;
+    this.cmRoot = cmRoot;
+  }
+
+  private void loadDumpFromFile() throws SemanticException {
+    try {
+      // read from dumpfile and instantiate self
+      FileSystem fs = dumpFile.getFileSystem(hiveConf);
+      BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(dumpFile)));
+      String line = null;
+      if ((line = br.readLine()) != null) {
+        String[] lineContents = line.split("\t", 5);
+        setDump(DumpType.valueOf(lineContents[0]), Long.valueOf(lineContents[1]),
+            Long.valueOf(lineContents[2]),
+            new Path(lineContents[3]));
+        setPayload(lineContents[4].equals(Utilities.nullStringOutput) ? null : lineContents[4]);
+        ReplChangeManager.setCmRoot(cmRoot);
+      } else {
+        throw new IOException(
+            "Unable to read valid values from dumpFile:" + dumpFile.toUri().toString());
+      }
+    } catch (IOException ioe) {
+      throw new SemanticException(ioe);
+    }
+  }
+
+  public DumpType getDumpType() throws SemanticException {
+    initializeIfNot();
+    return this.dumpType;
+  }
+
+  public String getPayload() throws SemanticException {
+    initializeIfNot();
+    return this.payload;
+  }
+
+  public void setPayload(String payload) {
+    this.payload = payload;
+  }
+
+  public Long getEventFrom() throws SemanticException {
+    initializeIfNot();
+    return eventFrom;
+  }
+
+  public Long getEventTo() throws SemanticException {
+    initializeIfNot();
+    return eventTo;
+  }
+
+  public Path getDumpFilePath() {
+    return dumpFile;
+  }
+
+  public boolean isIncrementalDump() throws SemanticException {
+    initializeIfNot();
+    return (this.dumpType == DumpType.INCREMENTAL);
+  }
+
+  private void initializeIfNot() throws SemanticException {
+    if (!initialized) {
+      loadDumpFromFile();
+    }
+  }
+
+
+  public void write() throws SemanticException {
+    Utils.writeOutput(
+        Arrays.asList(
+            dumpType.toString(),
+            eventFrom.toString(),
+            eventTo.toString(),
+            cmRoot.toString(),
+            payload),
+        dumpFile,
+        hiveConf
+    );
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/MetaData.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/MetaData.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/MetaData.java
new file mode 100644
index 0000000..fc02dfd
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/MetaData.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+
+/**
+ * Utility class to help return complex value from readMetaData function
+ */
+public class MetaData {
+  private final Database db;
+  private final Table table;
+  private final Iterable<Partition> partitions;
+  private final ReplicationSpec replicationSpec;
+  public final Function function;
+
+  public MetaData() {
+    this(null, null, null, new ReplicationSpec(), null);
+  }
+
+  MetaData(Database db, Table table, Iterable<Partition> partitions,
+      ReplicationSpec replicationSpec, Function function) {
+    this.db = db;
+    this.table = table;
+    this.partitions = partitions;
+    this.replicationSpec = replicationSpec;
+    this.function = function;
+  }
+
+  public Database getDatabase() {
+    return db;
+  }
+
+  public Table getTable() {
+    return table;
+  }
+
+  public Iterable<Partition> getPartitions() {
+    return partitions;
+  }
+
+  public ReplicationSpec getReplicationSpec() {
+    return replicationSpec;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/MetadataJson.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/MetadataJson.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/MetadataJson.java
new file mode 100644
index 0000000..b7a5680
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/MetadataJson.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.parse.EximUtil;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.DBSerializer;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.FunctionSerializer;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.PartitionSerializer;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.TableSerializer;
+import org.apache.thrift.TBase;
+import org.apache.thrift.TDeserializer;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TJSONProtocol;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+import javax.annotation.Nullable;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter.Serializer.UTF_8;
+
+public class MetadataJson {
+  private final JSONObject json;
+  private final TDeserializer deserializer;
+  private final String tableDesc;
+
+  public MetadataJson(String message) throws JSONException, SemanticException {
+    deserializer = new TDeserializer(new TJSONProtocol.Factory());
+    json = new JSONObject(message);
+    checkCompatibility();
+    tableDesc = jsonEntry(TableSerializer.FIELD_NAME);
+  }
+
+  public MetaData getMetaData() throws TException, JSONException {
+    return new MetaData(
+        database(),
+        table(),
+        partitions(),
+        readReplicationSpec(),
+        function()
+    );
+  }
+
+  private Function function() throws TException {
+    return deserialize(new Function(), jsonEntry(FunctionSerializer.FIELD_NAME));
+  }
+
+  private Database database() throws TException {
+    return deserialize(new Database(), jsonEntry(DBSerializer.FIELD_NAME));
+  }
+
+  private Table table() throws TException {
+    return deserialize(new Table(), tableDesc);
+  }
+
+  private <T extends TBase> T deserialize(T intoObject, String json) throws TException {
+    if (json == null) {
+      return null;
+    }
+    deserializer.deserialize(intoObject, json, UTF_8);
+    return intoObject;
+  }
+
+  private List<Partition> partitions() throws JSONException, TException {
+    if (tableDesc == null) {
+      return null;
+    }
+    // TODO : jackson-streaming-iterable-redo this
+    JSONArray jsonPartitions = new JSONArray(json.getString(PartitionSerializer.FIELD_NAME));
+    List<Partition> partitionsList = new ArrayList<>(jsonPartitions.length());
+    for (int i = 0; i < jsonPartitions.length(); ++i) {
+      String partDesc = jsonPartitions.getString(i);
+      partitionsList.add(deserialize(new Partition(), partDesc));
+    }
+    return partitionsList;
+  }
+
+  private ReplicationSpec readReplicationSpec() {
+    com.google.common.base.Function<String, String> keyFetcher =
+        new com.google.common.base.Function<String, String>() {
+          @Override
+          public String apply(@Nullable String s) {
+            return jsonEntry(s);
+          }
+        };
+    return new ReplicationSpec(keyFetcher);
+  }
+
+  private void checkCompatibility() throws SemanticException, JSONException {
+    String version = json.getString("version");
+    String fcVersion = jsonEntry("fcversion");
+    EximUtil.doCheckCompatibility(
+        EximUtil.METADATA_FORMAT_VERSION,
+        version,
+        fcVersion);
+  }
+
+  private String jsonEntry(String forName) {
+    try {
+      return json.getString(forName);
+    } catch (JSONException ignored) {
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapperTest.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapperTest.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapperTest.java
new file mode 100644
index 0000000..3028e76
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapperTest.java
@@ -0,0 +1,27 @@
+package org.apache.hadoop.hive.ql.parse.repl.dump;
+
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+
+@RunWith(MockitoJUnitRunner.class)
+public class HiveWrapperTest {
+  @Mock
+  private HiveWrapper.Tuple.Function<ReplicationSpec> specFunction;
+  @Mock
+  private HiveWrapper.Tuple.Function<Table> tableFunction;
+
+  @Test
+  public void replicationIdIsRequestedBeforeObjectDefinition() throws HiveException {
+    new HiveWrapper.Tuple<>(specFunction, tableFunction);
+    InOrder inOrder = Mockito.inOrder(specFunction, tableFunction);
+    inOrder.verify(specFunction).fromMetaStore();
+    inOrder.verify(tableFunction).fromMetaStore();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java
index d44cb79..4b802c4 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hive.ql.parse.repl.events;
 
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
 import org.junit.Test;
 
 import static org.junit.Assert.assertTrue;
@@ -44,7 +44,7 @@ public class TestEventHandlerFactory {
       }
 
       @Override
-      public ReplicationSemanticAnalyzer.DUMPTYPE dumpType() {
+      public DumpType dumpType() {
         return null;
       }
     }


[38/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/test/results/clientpositive/mm_all.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/mm_all.q.out
index 116f2b1,0000000..db5de69
mode 100644,000000..100644
--- a/ql/src/test/results/clientpositive/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/mm_all.q.out
@@@ -1,3266 -1,0 +1,3166 @@@
 +PREHOOK: query: drop table intermediate
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermediate
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermediate
 +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermediate
 +PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=455
 +POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=455
 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=456
 +POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=456
 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=457
 +POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=457
 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: drop table part_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table part_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@part_mm
 +POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@part_mm
 +PREHOOK: query: explain insert into table part_mm partition(key_mm=455) select key from intermediate
 +PREHOOK: type: QUERY
 +POSTHOOK: query: explain insert into table part_mm partition(key_mm=455) select key from intermediate
 +POSTHOOK: type: QUERY
 +STAGE DEPENDENCIES:
 +  Stage-1 is a root stage
 +  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
 +  Stage-4
 +  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
 +  Stage-2 depends on stages: Stage-0
 +  Stage-3
 +  Stage-5
 +  Stage-6 depends on stages: Stage-5
 +
 +STAGE PLANS:
 +  Stage: Stage-1
 +    Map Reduce
 +      Map Operator Tree:
 +          TableScan
 +            alias: intermediate
 +            Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
 +            Select Operator
 +              expressions: key (type: int)
 +              outputColumnNames: _col0
 +              Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
 +              File Output Operator
 +                compressed: false
 +                Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
 +                table:
 +                    input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 +                    output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 +                    serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
 +                    name: default.part_mm
 +
 +  Stage: Stage-7
 +    Conditional Operator
 +
 +  Stage: Stage-4
 +    Move Operator
 +      files:
 +          hdfs directory: true
 +#### A masked pattern was here ####
 +
 +  Stage: Stage-0
 +    Move Operator
 +      tables:
 +          partition:
 +            key_mm 455
 +          replace: false
 +          table:
 +              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 +              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 +              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
 +              name: default.part_mm
 +          micromanaged table: true
 +
 +  Stage: Stage-2
 +    Stats-Aggr Operator
 +
 +  Stage: Stage-3
 +    Merge File Operator
 +      Map Operator Tree:
 +          ORC File Merge Operator
 +      merge level: stripe
 +      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 +
 +  Stage: Stage-5
 +    Merge File Operator
 +      Map Operator Tree:
 +          ORC File Merge Operator
 +      merge level: stripe
 +      input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 +
 +  Stage: Stage-6
 +    Move Operator
 +      files:
 +          hdfs directory: true
 +#### A masked pattern was here ####
 +
 +PREHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_mm partition(key_mm=456) select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_mm@key_mm=456
 +POSTHOOK: query: insert into table part_mm partition(key_mm=456) select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_mm@key_mm=456
 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from part_mm order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_mm
 +PREHOOK: Input: default@part_mm@key_mm=455
 +PREHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_mm order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_mm
 +POSTHOOK: Input: default@part_mm@key_mm=455
 +POSTHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +0	455
 +0	455
 +0	456
 +10	455
 +10	455
 +10	456
 +97	455
 +97	455
 +97	456
 +98	455
 +98	455
 +98	456
 +100	455
 +100	455
 +100	456
 +103	455
 +103	455
 +103	456
 +PREHOOK: query: select * from part_mm order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_mm
 +PREHOOK: Input: default@part_mm@key_mm=455
 +PREHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_mm order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_mm
 +POSTHOOK: Input: default@part_mm@key_mm=455
 +POSTHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +0	455
 +0	455
 +0	456
 +10	455
 +10	455
 +10	456
 +97	455
 +97	455
 +97	456
 +98	455
 +98	455
 +98	456
 +100	455
 +100	455
 +100	456
 +103	455
 +103	455
 +103	456
 +PREHOOK: query: truncate table part_mm
 +PREHOOK: type: TRUNCATETABLE
 +PREHOOK: Output: default@part_mm@key_mm=455
 +PREHOOK: Output: default@part_mm@key_mm=456
 +POSTHOOK: query: truncate table part_mm
 +POSTHOOK: type: TRUNCATETABLE
 +POSTHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: Output: default@part_mm@key_mm=456
 +PREHOOK: query: select * from part_mm order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_mm
 +PREHOOK: Input: default@part_mm@key_mm=455
 +PREHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_mm order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_mm
 +POSTHOOK: Input: default@part_mm@key_mm=455
 +POSTHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +PREHOOK: query: drop table part_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@part_mm
 +PREHOOK: Output: default@part_mm
 +POSTHOOK: query: drop table part_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@part_mm
 +POSTHOOK: Output: default@part_mm
 +PREHOOK: query: drop table simple_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table simple_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@simple_mm
 +PREHOOK: query: insert into table simple_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: insert into table simple_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_mm
 +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert overwrite table simple_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: insert overwrite table simple_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_mm
 +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +0
 +10
 +97
 +98
 +100
 +103
 +PREHOOK: query: insert into table simple_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: insert into table simple_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_mm
 +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +0
 +0
 +10
 +10
 +97
 +97
 +98
 +98
 +100
 +100
 +103
 +103
 +PREHOOK: query: truncate table simple_mm
 +PREHOOK: type: TRUNCATETABLE
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: truncate table simple_mm
 +POSTHOOK: type: TRUNCATETABLE
 +POSTHOOK: Output: default@simple_mm
 +PREHOOK: query: select * from simple_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +PREHOOK: query: drop table simple_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@simple_mm
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: drop table simple_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@simple_mm
 +POSTHOOK: Output: default@simple_mm
 +PREHOOK: query: drop table dp_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table dp_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc
 +  tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@dp_mm
 +POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc
 +  tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@dp_mm
 +PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@dp_mm@key1=123
 +POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=0
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=10
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=100
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=103
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=97
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=98
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=100).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=103).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=10).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=97).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=98).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from dp_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@dp_mm
 +PREHOOK: Input: default@dp_mm@key1=123/key2=0
 +PREHOOK: Input: default@dp_mm@key1=123/key2=10
 +PREHOOK: Input: default@dp_mm@key1=123/key2=100
 +PREHOOK: Input: default@dp_mm@key1=123/key2=103
 +PREHOOK: Input: default@dp_mm@key1=123/key2=97
 +PREHOOK: Input: default@dp_mm@key1=123/key2=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from dp_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@dp_mm
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=0
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=10
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=100
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=103
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=97
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=98
 +#### A masked pattern was here ####
 +0	123	0
 +10	123	10
 +97	123	97
 +98	123	98
 +100	123	100
 +103	123	103
 +PREHOOK: query: drop table dp_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@dp_mm
 +PREHOOK: Output: default@dp_mm
 +POSTHOOK: query: drop table dp_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@dp_mm
 +POSTHOOK: Output: default@dp_mm
 +PREHOOK: query: create table union_mm(id int)  tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: create table union_mm(id int)  tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@union_mm
 +PREHOOK: query: insert into table union_mm 
 +select temps.p from (
 +select key as p from intermediate 
 +union all 
 +select key + 1 as p from intermediate ) temps
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: insert into table union_mm 
 +select temps.p from (
 +select key as p from intermediate 
 +union all 
 +select key + 1 as p from intermediate ) temps
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@union_mm
 +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from union_mm order by id
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from union_mm order by id
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +0
 +1
 +10
 +11
 +97
 +98
 +98
 +99
 +100
 +101
 +103
 +104
 +PREHOOK: query: insert into table union_mm 
 +select p from
 +(
 +select key + 1 as p from intermediate
 +union all
 +select key from intermediate
 +) tab group by p
 +union all
 +select key + 2 as p from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: insert into table union_mm 
 +select p from
 +(
 +select key + 1 as p from intermediate
 +union all
 +select key from intermediate
 +) tab group by p
 +union all
 +select key + 2 as p from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@union_mm
 +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from union_mm order by id
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from union_mm order by id
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +0
 +0
 +1
 +1
 +2
 +10
 +10
 +11
 +11
 +12
 +97
 +97
 +98
 +98
 +98
 +99
 +99
 +99
 +100
 +100
 +100
 +101
 +101
 +102
 +103
 +103
 +104
 +104
 +105
 +PREHOOK: query: insert into table union_mm
 +SELECT p FROM
 +(
 +  SELECT key + 1 as p FROM intermediate
 +  UNION ALL
 +  SELECT key as p FROM ( 
 +    SELECT distinct key FROM (
 +      SELECT key FROM (
 +        SELECT key + 2 as key FROM intermediate
 +        UNION ALL
 +        SELECT key FROM intermediate
 +      )t1 
 +    group by key)t2
 +  )t3
 +)t4
 +group by p
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: insert into table union_mm
 +SELECT p FROM
 +(
 +  SELECT key + 1 as p FROM intermediate
 +  UNION ALL
 +  SELECT key as p FROM ( 
 +    SELECT distinct key FROM (
 +      SELECT key FROM (
 +        SELECT key + 2 as key FROM intermediate
 +        UNION ALL
 +        SELECT key FROM intermediate
 +      )t1 
 +    group by key)t2
 +  )t3
 +)t4
 +group by p
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@union_mm
 +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from union_mm order by id
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from union_mm order by id
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +0
 +0
 +0
 +1
 +1
 +1
 +2
 +2
 +10
 +10
 +10
 +11
 +11
 +11
 +12
 +12
 +97
 +97
 +97
 +98
 +98
 +98
 +98
 +99
 +99
 +99
 +99
 +100
 +100
 +100
 +100
 +101
 +101
 +101
 +102
 +102
 +103
 +103
 +103
 +104
 +104
 +104
 +105
 +105
 +PREHOOK: query: drop table union_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@union_mm
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: drop table union_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@union_mm
 +POSTHOOK: Output: default@union_mm
 +PREHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@partunion_mm
 +POSTHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@partunion_mm
 +PREHOOK: query: insert into table partunion_mm partition(key)
 +select temps.* from (
 +select key as p, key from intermediate 
 +union all 
 +select key + 1 as p, key + 1 from intermediate ) temps
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@partunion_mm
 +POSTHOOK: query: insert into table partunion_mm partition(key)
 +select temps.* from (
 +select key as p, key from intermediate 
 +union all 
 +select key + 1 as p, key + 1 from intermediate ) temps
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@partunion_mm@key=0
 +POSTHOOK: Output: default@partunion_mm@key=1
 +POSTHOOK: Output: default@partunion_mm@key=10
 +POSTHOOK: Output: default@partunion_mm@key=100
 +POSTHOOK: Output: default@partunion_mm@key=101
 +POSTHOOK: Output: default@partunion_mm@key=103
 +POSTHOOK: Output: default@partunion_mm@key=104
 +POSTHOOK: Output: default@partunion_mm@key=11
 +POSTHOOK: Output: default@partunion_mm@key=97
 +POSTHOOK: Output: default@partunion_mm@key=98
 +POSTHOOK: Output: default@partunion_mm@key=99
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=100).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=103).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=10).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from partunion_mm order by id
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@partunion_mm
 +PREHOOK: Input: default@partunion_mm@key=0
 +PREHOOK: Input: default@partunion_mm@key=1
 +PREHOOK: Input: default@partunion_mm@key=10
 +PREHOOK: Input: default@partunion_mm@key=100
 +PREHOOK: Input: default@partunion_mm@key=101
 +PREHOOK: Input: default@partunion_mm@key=103
 +PREHOOK: Input: default@partunion_mm@key=104
 +PREHOOK: Input: default@partunion_mm@key=11
 +PREHOOK: Input: default@partunion_mm@key=97
 +PREHOOK: Input: default@partunion_mm@key=98
 +PREHOOK: Input: default@partunion_mm@key=99
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from partunion_mm order by id
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@partunion_mm
 +POSTHOOK: Input: default@partunion_mm@key=0
 +POSTHOOK: Input: default@partunion_mm@key=1
 +POSTHOOK: Input: default@partunion_mm@key=10
 +POSTHOOK: Input: default@partunion_mm@key=100
 +POSTHOOK: Input: default@partunion_mm@key=101
 +POSTHOOK: Input: default@partunion_mm@key=103
 +POSTHOOK: Input: default@partunion_mm@key=104
 +POSTHOOK: Input: default@partunion_mm@key=11
 +POSTHOOK: Input: default@partunion_mm@key=97
 +POSTHOOK: Input: default@partunion_mm@key=98
 +POSTHOOK: Input: default@partunion_mm@key=99
 +#### A masked pattern was here ####
 +0	0
 +1	1
 +10	10
 +11	11
 +97	97
 +98	98
 +98	98
 +99	99
 +100	100
 +101	101
 +103	103
 +104	104
 +PREHOOK: query: drop table partunion_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@partunion_mm
 +PREHOOK: Output: default@partunion_mm
 +POSTHOOK: query: drop table partunion_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@partunion_mm
 +POSTHOOK: Output: default@partunion_mm
 +PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3))
 + stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@skew_mm
 +POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3))
 + stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@skew_mm
 +PREHOOK: query: insert into table skew_mm 
 +select key, key, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@skew_mm
 +POSTHOOK: query: insert into table skew_mm 
 +select key, key, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@skew_mm
 +POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_mm.k4 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from skew_mm order by k2, k1, k4
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@skew_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from skew_mm order by k2, k1, k4
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@skew_mm
 +#### A masked pattern was here ####
 +0	0	0
 +10	10	10
 +97	97	97
 +98	98	98
 +100	100	100
 +103	103	103
 +PREHOOK: query: drop table skew_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@skew_mm
 +PREHOOK: Output: default@skew_mm
 +POSTHOOK: query: drop table skew_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@skew_mm
 +POSTHOOK: Output: default@skew_mm
 +PREHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) 
 +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@skew_dp_union_mm
 +POSTHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) 
 +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@skew_dp_union_mm
 +PREHOOK: query: insert into table skew_dp_union_mm partition (k3)
 +select key as i, key as j, key as k, key as l from intermediate
 +union all 
 +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@skew_dp_union_mm
 +POSTHOOK: query: insert into table skew_dp_union_mm partition (k3)
 +select key as i, key as j, key as k, key as l from intermediate
 +union all 
 +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=0
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=10
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=100
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=101
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=102
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=103
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=104
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=107
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=14
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=4
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=97
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=98
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from skew_dp_union_mm order by k2, k1, k4
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@skew_dp_union_mm
 +PREHOOK: Input: default@skew_dp_union_mm@k3=0
 +PREHOOK: Input: default@skew_dp_union_mm@k3=10
 +PREHOOK: Input: default@skew_dp_union_mm@k3=100
 +PREHOOK: Input: default@skew_dp_union_mm@k3=101
 +PREHOOK: Input: default@skew_dp_union_mm@k3=102
 +PREHOOK: Input: default@skew_dp_union_mm@k3=103
 +PREHOOK: Input: default@skew_dp_union_mm@k3=104
 +PREHOOK: Input: default@skew_dp_union_mm@k3=107
 +PREHOOK: Input: default@skew_dp_union_mm@k3=14
 +PREHOOK: Input: default@skew_dp_union_mm@k3=4
 +PREHOOK: Input: default@skew_dp_union_mm@k3=97
 +PREHOOK: Input: default@skew_dp_union_mm@k3=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from skew_dp_union_mm order by k2, k1, k4
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@skew_dp_union_mm
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=0
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=10
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=100
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=101
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=102
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=103
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=104
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=107
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=14
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=4
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=97
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=98
 +#### A masked pattern was here ####
 +0	0	0	0
 +1	2	3	4
 +10	10	10	10
 +11	12	13	14
 +97	97	97	97
 +98	98	98	98
 +98	99	100	101
 +99	100	101	102
 +100	100	100	100
 +101	102	103	104
 +103	103	103	103
 +104	105	106	107
 +PREHOOK: query: drop table skew_dp_union_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@skew_dp_union_mm
 +PREHOOK: Output: default@skew_dp_union_mm
 +POSTHOOK: query: drop table skew_dp_union_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@skew_dp_union_mm
 +POSTHOOK: Output: default@skew_dp_union_mm
 +PREHOOK: query: create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@merge0_mm
 +POSTHOOK: query: create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@merge0_mm
 +PREHOOK: query: insert into table merge0_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge0_mm
 +POSTHOOK: query: insert into table merge0_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge0_mm
 +POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge0_mm
 +#### A masked pattern was here ####
 +98
 +97
 +0
 +10
 +100
 +103
 +PREHOOK: query: insert into table merge0_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge0_mm
 +POSTHOOK: query: insert into table merge0_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge0_mm
 +POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge0_mm
 +#### A masked pattern was here ####
 +98
 +97
 +0
 +10
 +100
 +103
 +98
 +97
 +0
 +10
 +100
 +103
 +PREHOOK: query: drop table merge0_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@merge0_mm
 +PREHOOK: Output: default@merge0_mm
 +POSTHOOK: query: drop table merge0_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@merge0_mm
 +POSTHOOK: Output: default@merge0_mm
 +PREHOOK: query: create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@merge2_mm
 +POSTHOOK: query: create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@merge2_mm
 +PREHOOK: query: insert into table merge2_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge2_mm
 +POSTHOOK: query: insert into table merge2_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge2_mm
 +POSTHOOK: Lineage: merge2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge2_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge2_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge2_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge2_mm
 +#### A masked pattern was here ####
 +98
 +97
 +0
 +10
 +100
 +103
 +PREHOOK: query: insert into table merge2_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge2_mm
 +POSTHOOK: query: insert into table merge2_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge2_mm
 +POSTHOOK: Lineage: merge2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge2_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge2_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge2_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge2_mm
 +#### A masked pattern was here ####
 +98
 +97
 +0
 +10
 +100
 +103
 +98
 +97
 +0
 +10
 +100
 +103
 +PREHOOK: query: drop table merge2_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@merge2_mm
 +PREHOOK: Output: default@merge2_mm
 +POSTHOOK: query: drop table merge2_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@merge2_mm
 +POSTHOOK: Output: default@merge2_mm
 +PREHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@merge1_mm
 +POSTHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@merge1_mm
 +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge1_mm
 +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge1_mm@key=0
 +POSTHOOK: Output: default@merge1_mm@key=10
 +POSTHOOK: Output: default@merge1_mm@key=100
 +POSTHOOK: Output: default@merge1_mm@key=103
 +POSTHOOK: Output: default@merge1_mm@key=97
 +POSTHOOK: Output: default@merge1_mm@key=98
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge1_mm order by id, key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge1_mm
 +PREHOOK: Input: default@merge1_mm@key=0
 +PREHOOK: Input: default@merge1_mm@key=10
 +PREHOOK: Input: default@merge1_mm@key=100
 +PREHOOK: Input: default@merge1_mm@key=103
 +PREHOOK: Input: default@merge1_mm@key=97
 +PREHOOK: Input: default@merge1_mm@key=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge1_mm order by id, key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge1_mm
 +POSTHOOK: Input: default@merge1_mm@key=0
 +POSTHOOK: Input: default@merge1_mm@key=10
 +POSTHOOK: Input: default@merge1_mm@key=100
 +POSTHOOK: Input: default@merge1_mm@key=103
 +POSTHOOK: Input: default@merge1_mm@key=97
 +POSTHOOK: Input: default@merge1_mm@key=98
 +#### A masked pattern was here ####
 +0	0
 +10	10
 +97	97
 +98	98
 +100	100
 +103	103
 +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge1_mm
 +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge1_mm@key=0
 +POSTHOOK: Output: default@merge1_mm@key=10
 +POSTHOOK: Output: default@merge1_mm@key=100
 +POSTHOOK: Output: default@merge1_mm@key=103
 +POSTHOOK: Output: default@merge1_mm@key=97
 +POSTHOOK: Output: default@merge1_mm@key=98
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge1_mm order by id, key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge1_mm
 +PREHOOK: Input: default@merge1_mm@key=0
 +PREHOOK: Input: default@merge1_mm@key=10
 +PREHOOK: Input: default@merge1_mm@key=100
 +PREHOOK: Input: default@merge1_mm@key=103
 +PREHOOK: Input: default@merge1_mm@key=97
 +PREHOOK: Input: default@merge1_mm@key=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge1_mm order by id, key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge1_mm
 +POSTHOOK: Input: default@merge1_mm@key=0
 +POSTHOOK: Input: default@merge1_mm@key=10
 +POSTHOOK: Input: default@merge1_mm@key=100
 +POSTHOOK: Input: default@merge1_mm@key=103
 +POSTHOOK: Input: default@merge1_mm@key=97
 +POSTHOOK: Input: default@merge1_mm@key=98
 +#### A masked pattern was here ####
 +0	0
 +0	0
 +10	10
 +10	10
 +97	97
 +97	97
 +98	98
 +98	98
 +100	100
 +100	100
 +103	103
 +103	103
 +PREHOOK: query: drop table merge1_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@merge1_mm
 +PREHOOK: Output: default@merge1_mm
 +POSTHOOK: query: drop table merge1_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@merge1_mm
 +POSTHOOK: Output: default@merge1_mm
 +PREHOOK: query: drop table ctas0_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table ctas0_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
 +PREHOOK: type: CREATETABLE_AS_SELECT
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@ctas0_mm
 +POSTHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
 +POSTHOOK: type: CREATETABLE_AS_SELECT
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@ctas0_mm
 +POSTHOOK: Lineage: ctas0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: ctas0_mm.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 +PREHOOK: query: select * from ctas0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@ctas0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from ctas0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@ctas0_mm
 +#### A masked pattern was here ####
 +98	455
 +97	455
 +0	456
 +10	456
 +100	457
 +103	457
 +PREHOOK: query: drop table ctas0_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@ctas0_mm
 +PREHOOK: Output: default@ctas0_mm
 +POSTHOOK: query: drop table ctas0_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@ctas0_mm
 +POSTHOOK: Output: default@ctas0_mm
 +PREHOOK: query: drop table ctas1_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table ctas1_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
 +  select * from intermediate union all select * from intermediate
 +PREHOOK: type: CREATETABLE_AS_SELECT
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@ctas1_mm
 +POSTHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
 +  select * from intermediate union all select * from intermediate
 +POSTHOOK: type: CREATETABLE_AS_SELECT
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@ctas1_mm
 +POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: ctas1_mm.p EXPRESSION [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 +PREHOOK: query: select * from ctas1_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@ctas1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from ctas1_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@ctas1_mm
 +#### A masked pattern was here ####
 +98	455
 +98	455
 +97	455
 +97	455
 +0	456
 +0	456
 +10	456
 +10	456
 +100	457
 +100	457
 +103	457
 +103	457
 +PREHOOK: query: drop table ctas1_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@ctas1_mm
 +PREHOOK: Output: default@ctas1_mm
 +POSTHOOK: query: drop table ctas1_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@ctas1_mm
 +POSTHOOK: Output: default@ctas1_mm
 +PREHOOK: query: drop table iow0_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table iow0_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@iow0_mm
 +PREHOOK: query: insert overwrite table iow0_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: insert overwrite table iow0_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow0_mm
 +POSTHOOK: Lineage: iow0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table iow0_mm select key + 1 from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: insert into table iow0_mm select key + 1 from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow0_mm
 +POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow0_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow0_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow0_mm
 +#### A masked pattern was here ####
 +0
 +1
 +10
 +11
 +97
 +98
 +98
 +99
 +100
 +101
 +103
 +104
 +PREHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow0_mm
 +POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow0_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow0_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow0_mm
 +#### A masked pattern was here ####
 +2
 +12
 +99
 +100
 +102
 +105
 +PREHOOK: query: drop table iow0_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@iow0_mm
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: drop table iow0_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@iow0_mm
 +POSTHOOK: Output: default@iow0_mm
 +PREHOOK: query: drop table iow1_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table iow1_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@iow1_mm
 +PREHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key as k1, key from intermediate union all select key as k1, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key as k1, key from intermediate union all select key as k1, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow1_mm@key2=0
 +POSTHOOK: Output: default@iow1_mm@key2=10
 +POSTHOOK: Output: default@iow1_mm@key2=100
 +POSTHOOK: Output: default@iow1_mm@key2=103
 +POSTHOOK: Output: default@iow1_mm@key2=97
 +POSTHOOK: Output: default@iow1_mm@key2=98
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table iow1_mm partition (key2)
 +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: insert into table iow1_mm partition (key2)
 +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow1_mm@key2=0
 +POSTHOOK: Output: default@iow1_mm@key2=10
 +POSTHOOK: Output: default@iow1_mm@key2=100
 +POSTHOOK: Output: default@iow1_mm@key2=103
 +POSTHOOK: Output: default@iow1_mm@key2=97
 +POSTHOOK: Output: default@iow1_mm@key2=98
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow1_mm order by key, key2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow1_mm
 +PREHOOK: Input: default@iow1_mm@key2=0
 +PREHOOK: Input: default@iow1_mm@key2=10
 +PREHOOK: Input: default@iow1_mm@key2=100
 +PREHOOK: Input: default@iow1_mm@key2=103
 +PREHOOK: Input: default@iow1_mm@key2=97
 +PREHOOK: Input: default@iow1_mm@key2=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow1_mm order by key, key2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow1_mm
 +POSTHOOK: Input: default@iow1_mm@key2=0
 +POSTHOOK: Input: default@iow1_mm@key2=10
 +POSTHOOK: Input: default@iow1_mm@key2=100
 +POSTHOOK: Input: default@iow1_mm@key2=103
 +POSTHOOK: Input: default@iow1_mm@key2=97
 +POSTHOOK: Input: default@iow1_mm@key2=98
 +#### A masked pattern was here ####
 +0	0
 +0	0
 +0	0
 +1	0
 +10	10
 +10	10
 +10	10
 +11	10
 +97	97
 +97	97
 +97	97
 +98	97
 +98	98
 +98	98
 +98	98
 +99	98
 +100	100
 +100	100
 +100	100
 +101	100
 +103	103
 +103	103
 +103	103
 +104	103
 +PREHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow1_mm@key2=0
 +POSTHOOK: Output: default@iow1_mm@key2=10
 +POSTHOOK: Output: default@iow1_mm@key2=100
 +POSTHOOK: Output: default@iow1_mm@key2=103
 +POSTHOOK: Output: default@iow1_mm@key2=97
 +POSTHOOK: Output: default@iow1_mm@key2=98
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow1_mm order by key, key2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow1_mm
 +PREHOOK: Input: default@iow1_mm@key2=0
 +PREHOOK: Input: default@iow1_mm@key2=10
 +PREHOOK: Input: default@iow1_mm@key2=100
 +PREHOOK: Input: default@iow1_mm@key2=103
 +PREHOOK: Input: default@iow1_mm@key2=97
 +PREHOOK: Input: default@iow1_mm@key2=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow1_mm order by key, key2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow1_mm
 +POSTHOOK: Input: default@iow1_mm@key2=0
 +POSTHOOK: Input: default@iow1_mm@key2=10
 +POSTHOOK: Input: default@iow1_mm@key2=100
 +POSTHOOK: Input: default@iow1_mm@key2=103
 +POSTHOOK: Input: default@iow1_mm@key2=97
 +POSTHOOK: Input: default@iow1_mm@key2=98
 +#### A masked pattern was here ####
 +3	0
 +4	0
 +13	10
 +14	10
 +100	97
 +101	97
 +101	98
 +102	98
 +103	100
 +104	100
 +106	103
 +107	103
 +PREHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow1_mm@key2=100
 +POSTHOOK: Output: default@iow1_mm@key2=101
 +POSTHOOK: Output: default@iow1_mm@key2=102
 +POSTHOOK: Output: default@iow1_mm@key2=103
 +POSTHOOK: Output: default@iow1_mm@key2=105
 +POSTHOOK: Output: default@iow1_mm@key2=106
 +POSTHOOK: Output: default@iow1_mm@key2=12
 +POSTHOOK: Output: default@iow1_mm@key2=13
 +POSTHOOK: Output: default@iow1_mm@key2=2
 +POSTHOOK: Output: default@iow1_mm@key2=3
 +POSTHOOK: Output: default@iow1_mm@key2=99
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=105).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=106).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=12).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=13).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=2).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=3).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow1_mm order by key, key2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow1_mm
 +PREHOOK: Input: default@iow1_mm@key2=0
 +PREHOOK: Input: default@iow1_mm@key2=10
 +PREHOOK: Input: default@iow1_mm@key2=100
 +PREHOOK: Input: default@iow1_mm@key2=101
 +PREHOOK: Input: default@iow1_mm@key2=102
 +PREHOOK: Input: default@iow1_mm@key2=103
 +PREHOOK: Input: default@iow1_mm@key2=105
 +PREHOOK: Input: default@iow1_mm@key2=106
 +PREHOOK: Input: default@iow1_mm@key2=12
 +PREHOOK: Input: default@iow1_mm@key2=13
 +PREHOOK: Input: default@iow1_mm@key2=2
 +PREHOOK: Input: default@iow1_mm@key2=3
 +PREHOOK: Input: default@iow1_mm@key2=97
 +PREHOOK: Input: default@iow1_mm@key2=98
 +PREHOOK: Input: default@iow1_mm@key2=99
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow1_mm order by key, key2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow1_mm
 +POSTHOOK: Input: default@iow1_mm@key2=0
 +POSTHOOK: Input: default@iow1_mm@key2=10
 +POSTHOOK: Input: default@iow1_mm@key2=100
 +POSTHOOK: Input: default@iow1_mm@key2=101
 +POSTHOOK: Input: default@iow1_mm@key2=102
 +POSTHOOK: Input: default@iow1_mm@key2=103
 +POSTHOOK: Input: default@iow1_mm@key2=105
 +POSTHOOK: Input: default@iow1_mm@key2=106
 +POSTHOOK: Input: default@iow1_mm@key2=12
 +POSTHOOK: Input: default@iow1_mm@key2=13
 +POSTHOOK: Input: default@iow1_mm@key2=2
 +POSTHOOK: Input: default@iow1_mm@key2=3
 +POSTHOOK: Input: default@iow1_mm@key2=97
 +POSTHOOK: Input: default@iow1_mm@key2=98
 +POSTHOOK: Input: default@iow1_mm@key2=99
 +#### A masked pattern was here ####
 +2	2
 +3	0
 +3	3
 +4	0
 +12	12
 +13	10
 +13	13
 +14	10
 +99	99
 +100	97
 +100	100
 +100	100
 +101	97
 +101	98
 +101	101
 +102	98
 +102	102
 +103	103
 +105	105
 +106	106
 +PREHOOK: query: drop table iow1_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@iow1_mm
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: drop table iow1_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@iow1_mm
 +POSTHOOK: Output: default@iow1_mm
 +PREHOOK: query: drop table load0_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table load0_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: select count(1) from load0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +500
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: select count(1) from load0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +1000
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: select count(1) from load0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +500
 +PREHOOK: query: drop table load0_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@load0_mm
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: drop table load0_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@load0_mm
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: drop table intermediate2
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermediate2
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table intermediate2 (key string, value string) stored as textfile
 +#### A masked pattern was here ####
 +PREHOOK: type: CREATETABLE
 +#### A masked pattern was here ####
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: create table intermediate2 (key string, value string) stored as textfile
 +#### A masked pattern was here ####
 +POSTHOOK: type: CREATETABLE
 +#### A masked pattern was here ####
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: drop table load1_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table load1_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@load1_mm
 +POSTHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load1_mm
 +PREHOOK: query: select count(1) from load1_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load1_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +1000
 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load1_mm
 +PREHOOK: query: select count(1) from load1_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load1_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +1050
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load1_mm
 +PREHOOK: query: select count(1) from load1_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load1_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +500
 +PREHOOK: query: drop table load1_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@load1_mm
 +PREHOOK: Output: default@load1_mm
 +POSTHOOK: query: drop table load1_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@load1_mm
 +POSTHOOK: Output: default@load1_mm
 +PREHOOK: query: drop table load2_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table load2_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table load2_mm (key string, value string)
 +  partitioned by (k int, l int) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@load2_mm
 +POSTHOOK: query: create table load2_mm (key string, value string)
 +  partitioned by (k int, l int) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@load2_mm
 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load2_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load2_mm
 +POSTHOOK: Output: default@load2_mm@k=5/l=5
 +PREHOOK: query: select count(1) from load2_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load2_mm
 +PREHOOK: Input: default@load2_mm@k=5/l=5
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load2_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load2_mm
 +POSTHOOK: Input: default@load2_mm@k=5/l=5
 +#### A masked pattern was here ####
 +1025
 +PREHOOK: query: drop table load2_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@load2_mm
 +PREHOOK: Output: default@load2_mm
 +POSTHOOK: query: drop table load2_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@load2_mm
 +POSTHOOK: Output: default@load2_mm
 +PREHOOK: query: drop table intermediate2
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@intermediate2
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: drop table intermediate2
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@intermediate2
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: drop table intermediate_nonpart
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermediate_nonpart
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: drop table intermmediate_part
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermmediate_part
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: drop table intermmediate_nonpart
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermmediate_nonpart
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table intermediate_nonpart(key int, p int)
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermediate_nonpart
 +POSTHOOK: query: create table intermediate_nonpart(key int, p int)
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermediate_nonpart
 +PREHOOK: query: insert into intermediate_nonpart select * from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@intermediate_nonpart
 +POSTHOOK: query: insert into intermediate_nonpart select * from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@intermediate_nonpart
 +POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 +PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermmediate_nonpart
 +POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermmediate_nonpart
 +PREHOOK: query: insert into intermmediate_nonpart select * from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@intermmediate_nonpart
 +POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate

<TRUNCATED>

[05/50] [abbrv] hive git commit: HIVE-16550: Semijoin Hints should be able to skip the optimization if needed (Deepak Jaiswal, reviewed by Jason Dere)

Posted by we...@apache.org.
HIVE-16550: Semijoin Hints should be able to skip the optimization if needed (Deepak Jaiswal, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5d459665
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5d459665
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5d459665

Branch: refs/heads/hive-14535
Commit: 5d459665b6353756606cd0c2bf7c199170f912cc
Parents: d03261c
Author: Jason Dere <jd...@hortonworks.com>
Authored: Wed May 3 11:53:17 2017 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Wed May 3 11:53:17 2017 -0700

----------------------------------------------------------------------
 .../DynamicPartitionPruningOptimization.java    |   21 +-
 .../calcite/translator/HiveOpConverter.java     |    7 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   17 +-
 .../apache/hadoop/hive/ql/parse/HintParser.g    |    1 +
 .../hadoop/hive/ql/parse/ParseContext.java      |    9 +
 .../apache/hadoop/hive/ql/parse/ParseUtils.java |    1 +
 .../hadoop/hive/ql/parse/QBParseInfo.java       |    9 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  133 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |    1 +
 .../hive/ql/plan/ExprNodeDynamicListDesc.java   |   10 +-
 .../apache/hadoop/hive/ql/plan/JoinDesc.java    |   17 -
 .../hive/ql/ppd/SyntheticJoinPredicate.java     |    4 +-
 .../test/queries/clientpositive/semijoin_hint.q |   45 +-
 .../clientpositive/llap/semijoin_hint.q.out     | 1841 +++++++++++++++++-
 14 files changed, 1988 insertions(+), 128 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
index e1a6952..b8c0102 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
@@ -220,14 +220,23 @@ public class DynamicPartitionPruningOptimization implements NodeProcessor {
             }
             String tableAlias = (op == null ? "" : ((TableScanOperator) op).getConf().getAlias());
 
-            Map<String, SemiJoinHint> hints = ctx.desc.getHints();
-            SemiJoinHint sjHint = (hints != null) ? hints.get(tableAlias) : null;
             keyBaseAlias = ctx.generator.getOperatorId() + "_" + tableAlias + "_" + column;
 
-            semiJoinAttempted = generateSemiJoinOperatorPlan(
-                ctx, parseContext, ts, keyBaseAlias, sjHint);
-            if (!semiJoinAttempted && sjHint != null) {
-              throw new SemanticException("The user hint to enforce semijoin failed required conditions");
+            Map<String, SemiJoinHint> hints = parseContext.getSemiJoinHints();
+            if (hints != null) {
+              // If hints map has no entry that would imply that user enforced
+              // no runtime filtering.
+              if (hints.size() > 0) {
+                SemiJoinHint sjHint = hints.get(tableAlias);
+                semiJoinAttempted = generateSemiJoinOperatorPlan(
+                        ctx, parseContext, ts, keyBaseAlias, sjHint);
+                if (!semiJoinAttempted && sjHint != null) {
+                  throw new SemanticException("The user hint to enforce semijoin failed required conditions");
+                }
+              }
+            } else {
+              semiJoinAttempted = generateSemiJoinOperatorPlan(
+                      ctx, parseContext, ts, keyBaseAlias, null);
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index 40c0f3b..b9b600d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -339,8 +339,6 @@ public class HiveOpConverter {
     // through Hive
     String[] baseSrc = new String[joinRel.getInputs().size()];
     String tabAlias = getHiveDerivedTableAlias();
-    Map<String, SemiJoinHint> semiJoinHints = semanticAnalyzer.parseSemiJoinHint(
-        semanticAnalyzer.getQB().getParseInfo().getHints());
 
     // 1. Convert inputs
     OpAttr[] inputs = new OpAttr[joinRel.getInputs().size()];
@@ -407,7 +405,7 @@ public class HiveOpConverter {
 
     // 6. Generate Join operator
     JoinOperator joinOp = genJoin(joinRel, joinExpressions, filterExpressions, children,
-            baseSrc, tabAlias, semiJoinHints);
+            baseSrc, tabAlias);
 
     // 7. Return result
     return new OpAttr(tabAlias, newVcolsInCalcite, joinOp);
@@ -879,7 +877,7 @@ public class HiveOpConverter {
 
   private static JoinOperator genJoin(RelNode join, ExprNodeDesc[][] joinExpressions,
       List<List<ExprNodeDesc>> filterExpressions, List<Operator<?>> children,
-      String[] baseSrc, String tabAlias, Map<String, SemiJoinHint> semiJoinHints)
+      String[] baseSrc, String tabAlias)
           throws SemanticException {
 
     // 1. Extract join type
@@ -1006,7 +1004,6 @@ public class HiveOpConverter {
     // 4. We create the join operator with its descriptor
     JoinDesc desc = new JoinDesc(exprMap, outputColumnNames, noOuterJoin, joinCondns,
             filters, joinExpressions, 0);
-    desc.setSemiJoinHints(semiJoinHints);
     desc.setReversedExprs(reversedExprs);
     desc.setFilterMap(filterMap);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 1b054a7..5d640be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -335,7 +335,10 @@ public class CalcitePlanner extends SemanticAnalyzer {
       skipCalcitePlan = true;
     } else {
       PreCboCtx cboCtx = (PreCboCtx) plannerCtx;
-      ASTNode oldHints = getQB().getParseInfo().getHints();
+      List<ASTNode> oldHints = new ArrayList<>();
+      // Cache the hints before CBO runs and removes them.
+      // Use the hints later in top level QB.
+        getHintsFromQB(getQB(), oldHints);
 
       // Note: for now, we don't actually pass the queryForCbo to CBO, because
       // it accepts qb, not AST, and can also access all the private stuff in
@@ -364,6 +367,10 @@ public class CalcitePlanner extends SemanticAnalyzer {
               throw new SemanticException("Create view is not supported in cbo return path.");
             }
             sinkOp = getOptimizedHiveOPDag();
+            if (oldHints.size() > 0) {
+              LOG.debug("Propagating hints to QB: " + oldHints);
+              getQB().getParseInfo().setHintList(oldHints);
+            }
             LOG.info("CBO Succeeded; optimized logical plan.");
             this.ctx.setCboInfo("Plan optimized by CBO.");
             this.ctx.setCboSucceeded(true);
@@ -403,13 +410,13 @@ public class CalcitePlanner extends SemanticAnalyzer {
                 newAST = reAnalyzeCTASAfterCbo(newAST);
               }
             }
-            if (oldHints != null) {
+            if (oldHints.size() > 0) {
               if (getQB().getParseInfo().getHints() != null) {
-                LOG.warn("Hints are not null in the optimized tree; before CBO " + oldHints.dump()
-                    + "; after CBO " + getQB().getParseInfo().getHints().dump());
+                LOG.warn("Hints are not null in the optimized tree; "
+                    + "after CBO " + getQB().getParseInfo().getHints().dump());
               } else {
                 LOG.debug("Propagating hints to QB: " + oldHints);
-                getQB().getParseInfo().setHints(oldHints);
+                getQB().getParseInfo().setHintList(oldHints);
               }
             }
             Phase1Ctx ctx_1 = initPhase1Ctx();

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
index e110fb3..ec054b8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
@@ -83,4 +83,5 @@ hintArgName
     :
     Identifier
     | Number
+    | KW_NONE
     ;

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 3a1f821..6de4bcd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -123,6 +123,7 @@ public class ParseContext {
   private Map<ExprNodeDesc, GroupByOperator> colExprToGBMap =
           new HashMap<>();
 
+  private Map<String, SemiJoinHint> semiJoinHints;
   public ParseContext() {
   }
 
@@ -672,4 +673,12 @@ public class ParseContext {
   public Map<ExprNodeDesc, GroupByOperator> getColExprToGBMap() {
     return colExprToGBMap;
   }
+
+  public void setSemiJoinHints(Map<String, SemiJoinHint> hints) {
+    this.semiJoinHints = hints;
+  }
+
+  public Map<String, SemiJoinHint> getSemiJoinHints() {
+    return semiJoinHints;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
index 54e37f7..51aeeed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
@@ -441,6 +441,7 @@ public final class ParseUtils {
       HashSet<String> aliases = new HashSet<>();
       for (int i = 0; i < select.getChildCount(); ++i) {
         Tree selExpr = select.getChild(i);
+        if (selExpr.getType() == HiveParser.QUERY_HINT) continue;
         assert selExpr.getType() == HiveParser.TOK_SELEXPR;
         assert selExpr.getChildCount() > 0;
         // Examine the last child. It could be an alias.

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
index 7bf1c59..38df5de 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
@@ -44,6 +44,7 @@ public class QBParseInfo {
   private String alias;
   private ASTNode joinExpr;
   private ASTNode hints;
+  private List<ASTNode> hintList;
   private final HashMap<String, ASTNode> aliasToSrc;
   /**
    * insclause-0 -> TOK_TAB ASTNode
@@ -552,6 +553,14 @@ public class QBParseInfo {
     hints = hint;
   }
 
+  public void setHintList(List<ASTNode> hintList) {
+    this.hintList = hintList;
+  }
+
+  public List<ASTNode> getHintList() {
+    return hintList;
+  }
+
   public ASTNode getHints() {
     return hints;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index cbbb7d0..5115fc8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -8126,7 +8126,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
     JoinDesc desc = new JoinDesc(exprMap, outputColumnNames,
         join.getNoOuterJoin(), joinCondns, filterMap, joinKeys, 0);
-    desc.setSemiJoinHints(join.getSemiJoinHint());
     desc.setReversedExprs(reversedExprs);
     desc.setFilterMap(join.getFilterMap());
     // For outer joins, add filters that apply to more than one input
@@ -8673,11 +8672,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       LOG.info("STREAMTABLE hint honored.");
       parseStreamTables(joinTree, qb);
     }
-
-    if (qb.getParseInfo().getHints() != null) {
-      // TODO: do we need this for unique join?
-      joinTree.setSemiJoinHint(parseSemiJoinHint(qb.getParseInfo().getHints()));
-    }
     return joinTree;
   }
 
@@ -8976,8 +8970,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       if ((conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) == false) {
         parseStreamTables(joinTree, qb);
       }
-
-      joinTree.setSemiJoinHint(parseSemiJoinHint(qb.getParseInfo().getHints()));
     }
 
     return joinTree;
@@ -9031,46 +9023,65 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    *  2. TableName, bloom filter entries, and
    *  3. TableName, ColumnName
    *  */
-  public Map<String, SemiJoinHint> parseSemiJoinHint(ASTNode hints) throws SemanticException {
-    if (hints == null) return null;
+  private Map<String, SemiJoinHint> parseSemiJoinHint(List<ASTNode> hints) throws SemanticException {
+    if (hints == null || hints.size() == 0) return null;
     Map<String, SemiJoinHint> result = null;
-    for (Node hintNode : hints.getChildren()) {
-      ASTNode hint = (ASTNode) hintNode;
-      if (hint.getChild(0).getType() != HintParser.TOK_LEFTSEMIJOIN) continue;
-      if (result == null) {
-        result = new HashMap<>();
-      }
-      String alias = null;
-      String colName = null;
-      Tree args = hint.getChild(1);
-      for (int i = 0; i < args.getChildCount(); i++) {
-        // We can have table names, column names or sizes here (or incorrect hint if the user is so inclined).
-        String text = args.getChild(i).getText();
-        Integer number = null;
-        try {
-          number = Integer.parseInt(text);
-        } catch (NumberFormatException ex) { // Ignore.
+    for (ASTNode hintNode : hints) {
+      for (Node node : hintNode.getChildren()) {
+        ASTNode hint = (ASTNode) node;
+        if (hint.getChild(0).getType() != HintParser.TOK_LEFTSEMIJOIN) continue;
+        if (result == null) {
+          result = new HashMap<>();
+        }
+        String alias = null;
+        String colName = null;
+        Tree args = hint.getChild(1);
+        if (args.getChildCount() == 1) {
+          String text = args.getChild(0).getText();
+          if (text.equalsIgnoreCase("None")) {
+            // Hint to disable runtime filtering.
+            return result;
+          }
         }
-        if (number != null) {
-          if (alias == null) {
-            throw new SemanticException("Invalid semijoin hint - arg " + i + " ("
-                + text + ") is a number but the previous one is not an alias");
+        for (int i = 0; i < args.getChildCount(); i++) {
+          // We can have table names, column names or sizes here (or incorrect hint if the user is so inclined).
+          String text = args.getChild(i).getText();
+          Integer number = null;
+          try {
+            number = Integer.parseInt(text);
+          } catch (NumberFormatException ex) { // Ignore.
           }
-          SemiJoinHint sjHint = new SemiJoinHint(alias, colName, number);
-          result.put(alias, sjHint);
-          alias = null;
-          colName = null;
-        } else {
-          if (alias == null) {
-            alias = text;
-          } else if (colName == null ){
-            colName = text;
-          } else {
-            // No bloom filter entries provided.
-            SemiJoinHint sjHint = new SemiJoinHint(alias, colName, null);
+          if (number != null) {
+            if (alias == null) {
+              throw new SemanticException("Invalid semijoin hint - arg " + i + " ("
+                      + text + ") is a number but the previous one is not an alias");
+            }
+            if (result.get(alias) != null) {
+              // A hint with same alias already present, throw
+              throw new SemanticException("A hint with alias " + alias +
+                      " already present. Please use unique aliases");
+            }
+            SemiJoinHint sjHint = new SemiJoinHint(alias, colName, number);
             result.put(alias, sjHint);
-            alias = text;
+            alias = null;
             colName = null;
+          } else {
+            if (alias == null) {
+              alias = text;
+            } else if (colName == null) {
+              colName = text;
+            } else {
+              // No bloom filter entries provided.
+              if (result.get(alias) != null) {
+                // A hint with same alias already present, throw
+                throw new SemanticException("A hint with alias " + alias +
+                        " already present. Please use unique aliases");
+              }
+              SemiJoinHint sjHint = new SemiJoinHint(alias, colName, null);
+              result.put(alias, sjHint);
+              alias = text;
+              colName = null;
+            }
           }
         }
       }
@@ -11184,7 +11195,40 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       throw new SemanticException(ex);
     }
   }
+
+  public void getHintsFromQB(QB qb, List<ASTNode> hints) {
+    if (qb.getParseInfo().getHints() != null) {
+      hints.add(qb.getParseInfo().getHints());
+    }
+
+    Set<String> aliases = qb.getSubqAliases();
+
+    for (String alias : aliases) {
+      getHintsFromQB(qb.getSubqForAlias(alias), hints);
+    }
+  }
+
+  public void getHintsFromQB(QBExpr qbExpr, List<ASTNode> hints) {
+    QBExpr qbExpr1 = qbExpr.getQBExpr1();
+    QBExpr qbExpr2 = qbExpr.getQBExpr2();
+    QB qb = qbExpr.getQB();
+
+    if (qbExpr1 != null) {
+      getHintsFromQB(qbExpr1, hints);
+    }
+    if (qbExpr2 != null) {
+      getHintsFromQB(qbExpr2, hints);
+    }
+    if (qb != null) {
+      getHintsFromQB(qb, hints);
+    }
+  }
+
   Operator genOPTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticException {
+    // fetch all the hints in qb
+    List<ASTNode> hintsList = new ArrayList<>();
+    getHintsFromQB(qb, hintsList);
+    getQB().getParseInfo().setHintList(hintsList);
     return genPlan(qb);
   }
 
@@ -11243,6 +11287,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting,
         analyzeRewrite, tableDesc, createVwDesc, queryProperties, viewProjectToTableSchema, acidFileSinks);
 
+    // Set the semijoin hints in parse context
+    pCtx.setSemiJoinHints(parseSemiJoinHint(getQB().getParseInfo().getHintList()));
+
     // 5. Take care of view creation
     if (createVwDesc != null) {
       if (ctx.getExplainAnalyze() == AnalyzeState.RUNNING) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 5ea7800..08a8f00 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -533,6 +533,7 @@ public abstract class TaskCompiler {
     clone.setRsToRuntimeValuesInfoMap(pCtx.getRsToRuntimeValuesInfoMap());
     clone.setRsToSemiJoinBranchInfo(pCtx.getRsToSemiJoinBranchInfo());
     clone.setColExprToGBMap(pCtx.getColExprToGBMap());
+    clone.setSemiJoinHints(pCtx.getSemiJoinHints());
 
     return clone;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDynamicListDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDynamicListDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDynamicListDesc.java
index 3143554..57e27e6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDynamicListDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDynamicListDesc.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.ql.plan;
 import java.util.Map;
 
 import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.parse.SemiJoinHint;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
 /**
@@ -32,17 +31,15 @@ public class ExprNodeDynamicListDesc extends ExprNodeDesc {
 
   Operator<? extends OperatorDesc> source;
   int keyIndex;
-  Map<String, SemiJoinHint> hints;
 
   public ExprNodeDynamicListDesc() {
   }
 
   public ExprNodeDynamicListDesc(TypeInfo typeInfo, Operator<? extends OperatorDesc> source,
-      int keyIndex, Map<String, SemiJoinHint> hints) {
+      int keyIndex) {
     super(typeInfo);
     this.source = source;
     this.keyIndex = keyIndex;
-    this.hints = hints;
   }
 
   public void setSource(Operator<? extends OperatorDesc> source) {
@@ -63,7 +60,7 @@ public class ExprNodeDynamicListDesc extends ExprNodeDesc {
 
   @Override
   public ExprNodeDesc clone() {
-    return new ExprNodeDynamicListDesc(typeInfo, source, keyIndex, hints);
+    return new ExprNodeDynamicListDesc(typeInfo, source, keyIndex);
   }
 
   @Override
@@ -84,7 +81,4 @@ public class ExprNodeDynamicListDesc extends ExprNodeDesc {
     return source.toString();
   }
 
-  public Map<String, SemiJoinHint> getHints() {
-    return hints;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
index 7d4267d..c4fb3f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
@@ -29,7 +29,6 @@ import java.util.Map;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.parse.QBJoinTree;
-import org.apache.hadoop.hive.ql.parse.SemiJoinHint;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
 
@@ -107,10 +106,6 @@ public class JoinDesc extends AbstractOperatorDesc {
   private transient Map<String, Operator<? extends OperatorDesc>> aliasToOpInfo;
   private transient boolean leftInputJoin;
   private transient List<String> streamAliases;
-  // Note: there are two things in Hive called semi-joins - the left semi join construct,
-  //       and also a bloom-filter based optimization that came later. This is for the latter.
-  //       Everything else in this desc that says "semi-join" is for the former.
-  private transient Map<String, SemiJoinHint> semiJoinHints;
 
   // non-transient field, used at runtime to kill a task if it exceeded memory limits when running in LLAP
   protected long noConditionalTaskSize;
@@ -206,7 +201,6 @@ public class JoinDesc extends AbstractOperatorDesc {
     this.filterMap = clone.filterMap;
     this.residualFilterExprs = clone.residualFilterExprs;
     this.statistics = clone.statistics;
-    this.semiJoinHints = clone.semiJoinHints;
     this.noConditionalTaskSize = clone.noConditionalTaskSize;
   }
 
@@ -694,17 +688,6 @@ public class JoinDesc extends AbstractOperatorDesc {
   }
 
   private static final org.slf4j.Logger LOG = org.slf4j.LoggerFactory.getLogger(JoinDesc.class);
-  public void setSemiJoinHints(Map<String, SemiJoinHint> semiJoinHints) {
-    if (semiJoinHints != null || this.semiJoinHints != null) {
-      LOG.debug("Setting semi-join hints to " + semiJoinHints);
-    }
-    this.semiJoinHints = semiJoinHints;
-  }
-
-  public Map<String, SemiJoinHint> getSemiJoinHints() {
-    return semiJoinHints;
-  }
-
 
   public long getNoConditionalTaskSize() {
     return noConditionalTaskSize;

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
index f45daa8..64baa6a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
@@ -26,7 +26,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.hadoop.hive.ql.parse.SemiJoinHint;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -140,7 +139,6 @@ public class SyntheticJoinPredicate extends Transform {
 
       ReduceSinkOperator source = (ReduceSinkOperator) stack.get(stack.size() - 2);
       int srcPos = join.getParentOperators().indexOf(source);
-      Map<String, SemiJoinHint> hints = join.getConf().getSemiJoinHints();
 
       List<Operator<? extends OperatorDesc>> parents = join.getParentOperators();
 
@@ -181,7 +179,7 @@ public class SyntheticJoinPredicate extends Transform {
           inArgs.add(sourceKeys.get(i));
 
           ExprNodeDynamicListDesc dynamicExpr =
-              new ExprNodeDynamicListDesc(targetKeys.get(i).getTypeInfo(), target, i, hints);
+              new ExprNodeDynamicListDesc(targetKeys.get(i).getTypeInfo(), target, i);
 
           inArgs.add(dynamicExpr);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/test/queries/clientpositive/semijoin_hint.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/semijoin_hint.q b/ql/src/test/queries/clientpositive/semijoin_hint.q
index 5de0c8c..a3cd1d6 100644
--- a/ql/src/test/queries/clientpositive/semijoin_hint.q
+++ b/ql/src/test/queries/clientpositive/semijoin_hint.q
@@ -35,20 +35,55 @@ analyze table alltypesorc_int compute statistics for columns;
 analyze table srcpart_date compute statistics for columns;
 analyze table srcpart_small compute statistics for columns;
 
+create table srccc as select * from src;
+
 set hive.cbo.returnpath.hiveop=true;
 
-create table srccc as select * from src;
+-- disabling this test case for returnpath true as the aliases in case of union are mangled due to which hints are not excercised.
+--explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
+--        union all
+--        select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1);
+
+-- Query which creates semijoin
+explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+-- Skip semijoin by using keyword "None" as argument
+explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
 
-EXPLAIN select  /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1) join alltypesorc_int i on (k.value = i.cstring);
+EXPLAIN select  /*+ semi(srcpart_date, str, 5000)*/ count(*) from srcpart_date join srcpart_small v on (srcpart_date.str = v.key1) join alltypesorc_int i on (srcpart_date.value = i.cstring);
 EXPLAIN select  /*+ semi(i, 3000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1) join alltypesorc_int i on (v.key1 = i.cstring);
 
-explain select /*+ semi(k, str, 1000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
 
 set hive.cbo.returnpath.hiveop=false;
 
-explain select /*+ semi(k, 1000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
+        union all
+        select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1);
+
+-- Query which creates semijoin
+explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+-- Skip semijoin by using keyword "None" as argument
+explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+
+EXPLAIN select  /*+ semi(srcpart_date, str, 5000)*/ count(*) from srcpart_date join srcpart_small v on (srcpart_date.str = v.key1) join alltypesorc_int i on (srcpart_date.value = i.cstring);
+EXPLAIN select  /*+ semi(i, 3000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1) join alltypesorc_int i on (v.key1 = i.cstring);
+
+explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
 
 set hive.cbo.enable=false;
 
-explain select /*+ semi(k, str, 1000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
+        union all
+        select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1);
+
+-- Query which creates semijoin
+explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+-- Skip semijoin by using keyword "None" as argument
+explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+
+EXPLAIN select  /*+ semi(srcpart_date, str, 5000)*/ count(*) from srcpart_date join srcpart_small v on (srcpart_date.str = v.key1) join alltypesorc_int i on (srcpart_date.value = i.cstring);
+EXPLAIN select  /*+ semi(i, 3000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1) join alltypesorc_int i on (v.key1 = i.cstring);
+
+explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1);
+
 


[15/50] [abbrv] hive git commit: HIVE-16389: Allow HookContext to access SQLOperationDisplay (Sahil Takiar, reviewed by Sergio Pena)

Posted by we...@apache.org.
HIVE-16389: Allow HookContext to access SQLOperationDisplay (Sahil Takiar, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1fecb81f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1fecb81f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1fecb81f

Branch: refs/heads/hive-14535
Commit: 1fecb81f942ffa1ce91a2ea343f266ce82f6e7ef
Parents: 44804d8
Author: Sahil Takiar <ta...@gmail.com>
Authored: Fri May 5 09:46:09 2017 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Fri May 5 09:46:09 2017 -0500

----------------------------------------------------------------------
 .../service/cli/session/TestQueryDisplay.java   |  40 +++----
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  16 ++-
 .../org/apache/hadoop/hive/ql/QueryInfo.java    | 102 ++++++++++++++++
 .../hadoop/hive/ql/hooks/HookContext.java       |  10 +-
 .../org/apache/hive/tmpl/QueryProfileTmpl.jamon |  68 +++++------
 .../service/cli/operation/OperationManager.java |  47 ++++----
 .../service/cli/operation/QueryInfoCache.java   |  41 +++++++
 .../service/cli/operation/SQLOperation.java     |  33 +++---
 .../cli/operation/SQLOperationDisplay.java      | 108 -----------------
 .../cli/operation/SQLOperationDisplayCache.java |  39 -------
 .../service/servlet/QueryProfileServlet.java    |   8 +-
 .../hive-webapps/hiveserver2/hiveserver2.jsp    |  10 +-
 .../TestQueryLifeTimeHooksWithSQLOperation.java | 115 +++++++++++++++++++
 13 files changed, 380 insertions(+), 257 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
index 6c60125..155c65d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
@@ -19,11 +19,11 @@ package org.apache.hive.service.cli.session;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.QueryDisplay;
+import org.apache.hadoop.hive.ql.QueryInfo;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hive.service.cli.OperationHandle;
 import org.apache.hive.service.cli.SessionHandle;
-import org.apache.hive.service.cli.operation.SQLOperationDisplay;
 import org.apache.hive.service.rpc.thrift.TProtocolVersion;
 import org.apache.hive.service.server.HiveServer2;
 import org.apache.hive.tmpl.QueryProfileTmpl;
@@ -69,25 +69,25 @@ public class TestQueryDisplay {
     OperationHandle opHandle2 = session.executeStatement("show tables", null);
 
 
-    List<SQLOperationDisplay> liveSqlOperations, historicSqlOperations;
-    liveSqlOperations = sessionManager.getOperationManager().getLiveSqlOperations();
-    historicSqlOperations = sessionManager.getOperationManager().getHistoricalSQLOperations();
+    List<QueryInfo> liveSqlOperations, historicSqlOperations;
+    liveSqlOperations = sessionManager.getOperationManager().getLiveQueryInfos();
+    historicSqlOperations = sessionManager.getOperationManager().getHistoricalQueryInfos();
     Assert.assertEquals(liveSqlOperations.size(), 2);
     Assert.assertEquals(historicSqlOperations.size(), 0);
     verifyDDL(liveSqlOperations.get(0), "show databases", opHandle1.getHandleIdentifier().toString(), false);
     verifyDDL(liveSqlOperations.get(1),"show tables", opHandle2.getHandleIdentifier().toString(), false);
 
     session.closeOperation(opHandle1);
-    liveSqlOperations = sessionManager.getOperationManager().getLiveSqlOperations();
-    historicSqlOperations = sessionManager.getOperationManager().getHistoricalSQLOperations();
+    liveSqlOperations = sessionManager.getOperationManager().getLiveQueryInfos();
+    historicSqlOperations = sessionManager.getOperationManager().getHistoricalQueryInfos();
     Assert.assertEquals(liveSqlOperations.size(), 1);
     Assert.assertEquals(historicSqlOperations.size(), 1);
     verifyDDL(historicSqlOperations.get(0),"show databases", opHandle1.getHandleIdentifier().toString(), true);
     verifyDDL(liveSqlOperations.get(0),"show tables", opHandle2.getHandleIdentifier().toString(), false);
 
     session.closeOperation(opHandle2);
-    liveSqlOperations = sessionManager.getOperationManager().getLiveSqlOperations();
-    historicSqlOperations = sessionManager.getOperationManager().getHistoricalSQLOperations();
+    liveSqlOperations = sessionManager.getOperationManager().getLiveQueryInfos();
+    historicSqlOperations = sessionManager.getOperationManager().getHistoricalQueryInfos();
     Assert.assertEquals(liveSqlOperations.size(), 0);
     Assert.assertEquals(historicSqlOperations.size(), 2);
     verifyDDL(historicSqlOperations.get(1),"show databases", opHandle1.getHandleIdentifier().toString(), true);
@@ -123,23 +123,23 @@ public class TestQueryDisplay {
     session.close();
   }
 
-  private void verifyDDL(SQLOperationDisplay display, String stmt, String handle, boolean finished) {
+  private void verifyDDL(QueryInfo queryInfo, String stmt, String handle, boolean finished) {
 
-    Assert.assertEquals(display.getUserName(), "testuser");
-    Assert.assertEquals(display.getExecutionEngine(), "mr");
-    Assert.assertEquals(display.getOperationId(), handle);
-    Assert.assertTrue(display.getBeginTime() > 0 && display.getBeginTime() <= System.currentTimeMillis());
+    Assert.assertEquals(queryInfo.getUserName(), "testuser");
+    Assert.assertEquals(queryInfo.getExecutionEngine(), "mr");
+    Assert.assertEquals(queryInfo.getOperationId(), handle);
+    Assert.assertTrue(queryInfo.getBeginTime() > 0 && queryInfo.getBeginTime() <= System.currentTimeMillis());
 
     if (finished) {
-      Assert.assertTrue(display.getEndTime() > 0 && display.getEndTime() >= display.getBeginTime()
-        && display.getEndTime() <= System.currentTimeMillis());
-      Assert.assertTrue(display.getRuntime() > 0);
+      Assert.assertTrue(queryInfo.getEndTime() > 0 && queryInfo.getEndTime() >= queryInfo.getBeginTime()
+        && queryInfo.getEndTime() <= System.currentTimeMillis());
+      Assert.assertTrue(queryInfo.getRuntime() > 0);
     } else {
-      Assert.assertNull(display.getEndTime());
+      Assert.assertNull(queryInfo.getEndTime());
       //For runtime, query may have finished.
     }
 
-    QueryDisplay qDisplay1 = display.getQueryDisplay();
+    QueryDisplay qDisplay1 = queryInfo.getQueryDisplay();
     Assert.assertNotNull(qDisplay1);
     Assert.assertEquals(qDisplay1.getQueryString(), stmt);
     Assert.assertNotNull(qDisplay1.getExplainPlan());
@@ -170,9 +170,9 @@ public class TestQueryDisplay {
    */
   private void verifyDDLHtml(String stmt, String opHandle) throws Exception {
     StringWriter sw = new StringWriter();
-    SQLOperationDisplay sod = sessionManager.getOperationManager().getSQLOperationDisplay(
+    QueryInfo queryInfo = sessionManager.getOperationManager().getQueryInfo(
       opHandle);
-    new QueryProfileTmpl().render(sw, sod);
+    new QueryProfileTmpl().render(sw, queryInfo);
     String html = sw.toString();
 
     Assert.assertTrue(html.contains(stmt));

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index d32f313..29cce9a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -138,6 +138,7 @@ public class Driver implements CommandProcessor {
   private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
   static final private LogHelper console = new LogHelper(LOG);
   static final int SHUTDOWN_HOOK_PRIORITY = 0;
+  private final QueryInfo queryInfo;
   private Runnable shutdownRunner = null;
 
   private int maxRows = 100;
@@ -360,18 +361,22 @@ public class Driver implements CommandProcessor {
   }
 
   public Driver(HiveConf conf, String userName) {
-    this(new QueryState(conf), userName);
+    this(new QueryState(conf), userName, null);
   }
 
   public Driver(QueryState queryState, String userName) {
-    this(queryState, userName, new HooksLoader(queryState.getConf()));
+    this(queryState, userName, new HooksLoader(queryState.getConf()), null);
   }
 
   public Driver(HiveConf conf, HooksLoader hooksLoader) {
-    this(new QueryState(conf), null, hooksLoader);
+    this(new QueryState(conf), null, hooksLoader, null);
   }
 
-  private Driver(QueryState queryState, String userName, HooksLoader hooksLoader) {
+  public Driver(QueryState queryState, String userName, QueryInfo queryInfo) {
+     this(queryState, userName, new HooksLoader(queryState.getConf()), queryInfo);
+  }
+
+  public Driver(QueryState queryState, String userName, HooksLoader hooksLoader, QueryInfo queryInfo) {
     this.queryState = queryState;
     this.conf = queryState.getConf();
     isParallelEnabled = (conf != null)
@@ -379,6 +384,7 @@ public class Driver implements CommandProcessor {
     this.userName = userName;
     this.hooksLoader = hooksLoader;
     this.queryLifeTimeHookRunner = new QueryLifeTimeHookRunner(conf, hooksLoader, console);
+    this.queryInfo = queryInfo;
   }
 
   /**
@@ -1736,7 +1742,7 @@ public class Driver implements CommandProcessor {
 
       hookContext = new HookContext(plan, queryState, ctx.getPathToCS(), ss.getUserFromAuthenticator(),
           ss.getUserIpAddress(), InetAddress.getLocalHost().getHostAddress(), operationId,
-          ss.getSessionId(), Thread.currentThread().getName(), ss.isHiveServerQuery(), perfLogger);
+          ss.getSessionId(), Thread.currentThread().getName(), ss.isHiveServerQuery(), perfLogger, queryInfo);
       hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);
 
       for (Hook peh : hooksLoader.getHooks(HiveConf.ConfVars.PREEXECHOOKS, console)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/ql/src/java/org/apache/hadoop/hive/ql/QueryInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryInfo.java
new file mode 100644
index 0000000..adb72a7
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryInfo.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql;
+
+/**
+ * The class is synchronized, as WebUI may access information about a running query.
+ */
+public class QueryInfo {
+
+  private final String userName;
+  private final String executionEngine;
+  private final long beginTime;
+  private final String operationId;
+  private Long runtime;  // tracks only running portion of the query.
+
+  private Long endTime;
+  private String state;
+  private QueryDisplay queryDisplay;
+
+  public QueryInfo(String state, String userName, String executionEngine, String operationId) {
+    this.state = state;
+    this.userName = userName;
+    this.executionEngine = executionEngine;
+    this.beginTime = System.currentTimeMillis();
+    this.operationId = operationId;
+  }
+
+  public synchronized long getElapsedTime() {
+    if (isRunning()) {
+      return System.currentTimeMillis() - beginTime;
+    } else {
+      return endTime - beginTime;
+    }
+  }
+
+  public synchronized boolean isRunning() {
+    return endTime == null;
+  }
+
+  public synchronized QueryDisplay getQueryDisplay() {
+    return queryDisplay;
+  }
+
+  public synchronized void setQueryDisplay(QueryDisplay queryDisplay) {
+    this.queryDisplay = queryDisplay;
+  }
+
+  public String getUserName() {
+    return userName;
+  }
+
+  public String getExecutionEngine() {
+    return executionEngine;
+  }
+
+  public synchronized String getState() {
+    return state;
+  }
+
+  public long getBeginTime() {
+    return beginTime;
+  }
+
+  public synchronized Long getEndTime() {
+    return endTime;
+  }
+
+  public synchronized void updateState(String state) {
+    this.state = state;
+  }
+
+  public String getOperationId() {
+    return operationId;
+  }
+
+  public synchronized void setEndTime() {
+    this.endTime = System.currentTimeMillis();
+  }
+
+  public synchronized void setRuntime(long runtime) {
+    this.runtime = runtime;
+  }
+
+  public synchronized Long getRuntime() {
+    return runtime;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
index 359c238..97ad3c7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
@@ -26,9 +26,11 @@ import java.util.Set;
 
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.QueryInfo;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.TaskRunner;
+import org.apache.hadoop.hive.ql.history.HiveHistory;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.optimizer.lineage.LineageCtx.Index;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -68,11 +70,12 @@ public class HookContext {
   private final String threadId;
   private final boolean isHiveServerQuery;
   private final PerfLogger perfLogger;
+  private final QueryInfo queryInfo;
 
   public HookContext(QueryPlan queryPlan, QueryState queryState,
       Map<String, ContentSummary> inputPathToContentSummary, String userName, String ipAddress,
       String hiveInstanceAddress, String operationId, String sessionId, String threadId,
-      boolean isHiveServerQuery, PerfLogger perfLogger) throws Exception {
+      boolean isHiveServerQuery, PerfLogger perfLogger, QueryInfo queryInfo) throws Exception {
     this.queryPlan = queryPlan;
     this.queryState = queryState;
     this.conf = queryState.getConf();
@@ -95,6 +98,7 @@ public class HookContext {
     this.threadId = threadId;
     this.isHiveServerQuery = isHiveServerQuery;
     this.perfLogger = perfLogger;
+    this.queryInfo = queryInfo;
   }
 
   public QueryPlan getQueryPlan() {
@@ -232,4 +236,8 @@ public class HookContext {
   public PerfLogger getPerfLogger() {
     return perfLogger;
   }
+
+  public QueryInfo getQueryInfo() {
+    return queryInfo;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
----------------------------------------------------------------------
diff --git a/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon b/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
index fa69eb2..5e2d68c 100644
--- a/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
+++ b/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
@@ -17,12 +17,12 @@ See the License for the specific language governing permissions and
 limitations under the License.
 </%doc>
 <%args>
-SQLOperationDisplay sod;
+QueryInfo queryInfo;
 </%args>
 <%import>
 java.util.*;
 org.apache.hadoop.hive.ql.QueryDisplay;
-org.apache.hive.service.cli.operation.SQLOperationDisplay;
+org.apache.hadoop.hive.ql.QueryInfo;
 </%import>
 <!--[if IE]>
 <!DOCTYPE html>
@@ -65,7 +65,7 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
     </div>
 
 
-    <%if sod == null %>
+    <%if queryInfo == null %>
          <div class="jumbotron">
            <p>Query not found.  It may have been deleted, increase <i>hive.server2.webui.max.historic.queries</i>
               to retain more historic query information.</p>
@@ -76,7 +76,7 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
     <div class="container">
       <div class="row inner_header">
         <div class="page-header">
-          <h1>Query Information: <% sod.getQueryDisplay() == null ? "Unknown" : sod.getQueryDisplay().getQueryString() %></h1>
+          <h1>Query Information: <% queryInfo.getQueryDisplay() == null ? "Unknown" : queryInfo.getQueryDisplay().getQueryString() %></h1>
         </div>
       </div>
       <div class="row">
@@ -91,16 +91,16 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
           </ul>
           <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
               <div class="tab-pane active" id="tab_baseProfile">
-                  <& baseProfile; sod = sod &>
+                  <& baseProfile; queryInfo = queryInfo &>
               </div>
               <div class="tab-pane" id="tab_stages">
-                  <& stages; sod = sod &>
+                  <& stages; queryInfo = queryInfo &>
               </div>
               <div class="tab-pane" id="tab_queryPlan">
-                  <& queryPlan; sod = sod &>
+                  <& queryPlan; queryInfo = queryInfo &>
               </div>
               <div class="tab-pane" id="tab_perfLogging">
-                  <& perfLogging; sod = sod &>
+                  <& perfLogging; queryInfo = queryInfo &>
               </div>
           </div>
       </div>
@@ -117,57 +117,57 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
 
 <%def baseProfile>
 <%args>
-    SQLOperationDisplay sod;
+    QueryInfo queryInfo;
 </%args>
     <table class="table table-striped">
         <tr>
             <td>User Name</td>
-            <td><% sod.getUserName() %></td>
+            <td><% queryInfo.getUserName() %></td>
         </tr>
         <tr>
             <td>Query String</td>
-            <td><% sod.getQueryDisplay() == null ? "Unknown" : sod.getQueryDisplay().getQueryString() %></td>
+            <td><% queryInfo.getQueryDisplay() == null ? "Unknown" : queryInfo.getQueryDisplay().getQueryString() %></td>
         </tr>
         <tr>
             <td>Id</td>
-            <td><% sod.getQueryDisplay() == null ? "Unknown" : sod.getQueryDisplay().getQueryId() %></td>
+            <td><% queryInfo.getQueryDisplay() == null ? "Unknown" : queryInfo.getQueryDisplay().getQueryId() %></td>
         </tr>
         <tr>
             <td>Execution Engine</td>
-            <td><% sod.getExecutionEngine() %>
+            <td><% queryInfo.getExecutionEngine() %>
         </tr>
         <tr>
             <td>State</td>
-            <td><% sod.getState() %></td>
+            <td><% queryInfo.getState() %></td>
         </tr>
         <tr>
             <td>Opened Timestamp</td>
-            <td><% new Date(sod.getBeginTime()) %></td>
+            <td><% new Date(queryInfo.getBeginTime()) %></td>
         </tr>
         <tr>
             <td>Opened (s)</td>
-            <td><% sod.getElapsedTime()/1000 %></td>
+            <td><% queryInfo.getElapsedTime()/1000 %></td>
         </tr>
         <tr>
             <td>Closed Timestamp</td>
-            <td><% sod.getEndTime() == null ? "Open" : new Date(sod.getEndTime()) %></td>
+            <td><% queryInfo.getEndTime() == null ? "Open" : new Date(queryInfo.getEndTime()) %></td>
         </tr>
-        <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getErrorMessage() != null %>
+        <%if queryInfo.getQueryDisplay() != null && queryInfo.getQueryDisplay().getErrorMessage() != null %>
             <tr>
                 <td>Error</td>
-                <td><% sod.getQueryDisplay().getErrorMessage() %></td>
+                <td><% queryInfo.getQueryDisplay().getErrorMessage() %></td>
             </tr>
         </%if>
         <tr>
             <td>Latency (s)</td>
-            <td><% sod.getRuntime() == null ? "Not finished" : sod.getRuntime()/1000 %></td>
+            <td><% queryInfo.getRuntime() == null ? "Not finished" : queryInfo.getRuntime()/1000 %></td>
         </tr>
     </table>
 </%def>
 
 <%def stages>
 <%args>
-    SQLOperationDisplay sod;
+    QueryInfo queryInfo;
 </%args>
    <table class="table table-striped">
        <tr>
@@ -180,8 +180,8 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
            <th>Retry If Fail</th>
         </tr>
 
-       <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getTaskDisplays() != null %>
-           <%for QueryDisplay.TaskDisplay taskDisplay : sod.getQueryDisplay().getTaskDisplays() %>
+       <%if queryInfo.getQueryDisplay() != null && queryInfo.getQueryDisplay().getTaskDisplays() != null %>
+           <%for QueryDisplay.TaskDisplay taskDisplay : queryInfo.getQueryDisplay().getTaskDisplays() %>
                <tr>
                    <td><% taskDisplay.getTaskId() + ":" + taskDisplay.getTaskType() %></td>
                    <td><% taskDisplay.getStatus() %></td>
@@ -199,13 +199,13 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
 
 <%def queryPlan>
 <%args>
-    SQLOperationDisplay sod;
+    QueryInfo queryInfo;
 </%args>
     <div class="panel panel-default">
       <div class="panel-heading">Explain plan</div>
       <div class="panel-body">
         <pre>
-        <% sod.getQueryDisplay() == null ? "Unknown" : sod.getQueryDisplay().getExplainPlan() %>
+        <% queryInfo.getQueryDisplay() == null ? "Unknown" : queryInfo.getQueryDisplay().getExplainPlan() %>
         </pre>
       </div>
     </div>
@@ -214,7 +214,7 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
 
 <%def perfLogging>
 <%args>
-    SQLOperationDisplay sod;
+    QueryInfo queryInfo;
 </%args>
     <section>
       <h3>Compile-time metadata operations</h3>
@@ -224,8 +224,8 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
              <th>Time (ms)</th>
           </tr>
 
-          <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getHmsTimings(QueryDisplay.Phase.COMPILATION) != null %>
-             <%for Map.Entry<String, Long> time : sod.getQueryDisplay().getHmsTimings(QueryDisplay.Phase.COMPILATION).entrySet() %>
+          <%if queryInfo.getQueryDisplay() != null && queryInfo.getQueryDisplay().getHmsTimings(QueryDisplay.Phase.COMPILATION) != null %>
+             <%for Map.Entry<String, Long> time : queryInfo.getQueryDisplay().getHmsTimings(QueryDisplay.Phase.COMPILATION).entrySet() %>
                  <tr>
                      <td><% time.getKey() %></td>
                      <td><% time.getValue() %></td>
@@ -243,8 +243,8 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
              <th>Time (ms)</th>
           </tr>
 
-          <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getHmsTimings(QueryDisplay.Phase.EXECUTION) != null %>
-             <%for Map.Entry<String, Long> time : sod.getQueryDisplay().getHmsTimings(QueryDisplay.Phase.EXECUTION).entrySet() %>
+          <%if queryInfo.getQueryDisplay() != null && queryInfo.getQueryDisplay().getHmsTimings(QueryDisplay.Phase.EXECUTION) != null %>
+             <%for Map.Entry<String, Long> time : queryInfo.getQueryDisplay().getHmsTimings(QueryDisplay.Phase.EXECUTION).entrySet() %>
                  <tr>
                      <td><% time.getKey() %></td>
                      <td><% time.getValue() %></td>
@@ -262,8 +262,8 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
              <th>Time (ms)</th>
           </tr>
 
-          <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getPerfLogTimes(QueryDisplay.Phase.COMPILATION) != null %>
-             <%for Map.Entry<String, Long> time : sod.getQueryDisplay().getPerfLogTimes(QueryDisplay.Phase.COMPILATION).entrySet()  %>
+          <%if queryInfo.getQueryDisplay() != null && queryInfo.getQueryDisplay().getPerfLogTimes(QueryDisplay.Phase.COMPILATION) != null %>
+             <%for Map.Entry<String, Long> time : queryInfo.getQueryDisplay().getPerfLogTimes(QueryDisplay.Phase.COMPILATION).entrySet()  %>
                  <tr>
                      <td><% time.getKey() %></td>
                      <td><% time.getValue() %></td>
@@ -281,8 +281,8 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
              <th>Time (ms)</th>
           </tr>
 
-          <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getPerfLogTimes(QueryDisplay.Phase.EXECUTION) != null %>
-             <%for Map.Entry<String, Long> time : sod.getQueryDisplay().getPerfLogTimes(QueryDisplay.Phase.EXECUTION).entrySet()  %>
+          <%if queryInfo.getQueryDisplay() != null && queryInfo.getQueryDisplay().getPerfLogTimes(QueryDisplay.Phase.EXECUTION) != null %>
+             <%for Map.Entry<String, Long> time : queryInfo.getQueryDisplay().getPerfLogTimes(QueryDisplay.Phase.EXECUTION).entrySet()  %>
                  <tr>
                      <td><% time.getKey() %></td>
                      <td><% time.getValue() %></td>

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
index ac64ab2..46f524d 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Schema;
+import org.apache.hadoop.hive.ql.QueryInfo;
 import org.apache.hadoop.hive.ql.log.LogDivertAppender;
 import org.apache.hadoop.hive.ql.log.LogDivertAppenderForTest;
 import org.apache.hadoop.hive.ql.session.OperationLog;
@@ -62,8 +63,8 @@ public class OperationManager extends AbstractService {
 
   //Following fields for displaying queries on WebUI
   private Object webuiLock = new Object();
-  private SQLOperationDisplayCache historicSqlOperations;
-  private Map<String, SQLOperationDisplay> liveSqlOperations = new LinkedHashMap<String, SQLOperationDisplay>();
+  private QueryInfoCache historicalQueryInfos;
+  private Map<String, QueryInfo> liveQueryInfos = new LinkedHashMap<>();
 
   public OperationManager() {
     super(OperationManager.class.getSimpleName());
@@ -75,7 +76,7 @@ public class OperationManager extends AbstractService {
     LogDivertAppenderForTest.registerRoutingAppenderIfInTest(hiveConf);
 
     if (hiveConf.isWebUiQueryInfoCacheEnabled()) {
-      historicSqlOperations = new SQLOperationDisplayCache(
+      historicalQueryInfos = new QueryInfoCache(
         hiveConf.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES));
     }
     super.init(hiveConf);
@@ -187,8 +188,8 @@ public class OperationManager extends AbstractService {
     handleToOperation.put(operation.getHandle(), operation);
     if (operation instanceof SQLOperation) {
       synchronized (webuiLock) {
-        liveSqlOperations.put(operation.getHandle().getHandleIdentifier().toString(),
-          ((SQLOperation) operation).getSQLOperationDisplay());
+        liveQueryInfos.put(operation.getHandle().getHandleIdentifier().toString(),
+          ((SQLOperation) operation).getQueryInfo());
       }
     }
   }
@@ -196,7 +197,7 @@ public class OperationManager extends AbstractService {
   private Operation removeOperation(OperationHandle opHandle) {
     Operation operation = handleToOperation.remove(opHandle);
     if (operation instanceof SQLOperation) {
-      removeSaveSqlOperationDisplay(opHandle);
+      removeSafeQueryInfo(opHandle);
     }
     return operation;
   }
@@ -216,24 +217,24 @@ public class OperationManager extends AbstractService {
 
       handleToOperation.remove(operationHandle, operation);
       if (operation instanceof SQLOperation) {
-        removeSaveSqlOperationDisplay(operationHandle);
+        removeSafeQueryInfo(operationHandle);
       }
       return operation;
     }
     return null;
   }
 
-  private void removeSaveSqlOperationDisplay(OperationHandle operationHandle) {
+  private void removeSafeQueryInfo(OperationHandle operationHandle) {
     synchronized (webuiLock) {
       String opKey = operationHandle.getHandleIdentifier().toString();
       // remove from list of live operations
-      SQLOperationDisplay display = liveSqlOperations.remove(opKey);
+      QueryInfo display = liveQueryInfos.remove(opKey);
       if (display == null) {
         LOG.debug("Unexpected display object value of null for operation {}",
             opKey);
-      } else if (historicSqlOperations != null) {
+      } else if (historicalQueryInfos != null) {
         // add to list of saved historic operations
-        historicSqlOperations.put(opKey, display);
+        historicalQueryInfos.put(opKey, display);
       }
     }
   }
@@ -258,7 +259,7 @@ public class OperationManager extends AbstractService {
       LOG.debug(opHandle + ": Attempting to cancel from state - " + opState);
       operation.cancel(OperationState.CANCELED);
       if (operation instanceof SQLOperation) {
-        removeSaveSqlOperationDisplay(opHandle);
+        removeSafeQueryInfo(opHandle);
       }
     }
   }
@@ -360,11 +361,11 @@ public class OperationManager extends AbstractService {
    * @return displays representing a number of historical SQLOperations, at max number of
    * hive.server2.webui.max.historic.queries. Newest items will be first.
    */
-  public List<SQLOperationDisplay> getHistoricalSQLOperations() {
-    List<SQLOperationDisplay> result = new LinkedList<>();
+  public List<QueryInfo> getHistoricalQueryInfos() {
+    List<QueryInfo> result = new LinkedList<>();
     synchronized (webuiLock) {
-      if (historicSqlOperations != null) {
-        result.addAll(historicSqlOperations.values());
+      if (historicalQueryInfos != null) {
+        result.addAll(historicalQueryInfos.values());
         Collections.reverse(result);
       }
     }
@@ -374,10 +375,10 @@ public class OperationManager extends AbstractService {
   /**
    * @return displays representing live SQLOperations
    */
-  public List<SQLOperationDisplay> getLiveSqlOperations() {
-    List<SQLOperationDisplay> result = new LinkedList<>();
+  public List<QueryInfo> getLiveQueryInfos() {
+    List<QueryInfo> result = new LinkedList<>();
     synchronized (webuiLock) {
-      result.addAll(liveSqlOperations.values());
+      result.addAll(liveQueryInfos.values());
     }
     return result;
   }
@@ -386,17 +387,17 @@ public class OperationManager extends AbstractService {
    * @param handle handle of SQLOperation.
    * @return display representing a particular SQLOperation.
    */
-  public SQLOperationDisplay getSQLOperationDisplay(String handle) {
+  public QueryInfo getQueryInfo(String handle) {
     synchronized (webuiLock) {
-      if (historicSqlOperations == null) {
+      if (historicalQueryInfos == null) {
         return null;
       }
 
-      SQLOperationDisplay result = liveSqlOperations.get(handle);
+      QueryInfo result = liveQueryInfos.get(handle);
       if (result != null) {
         return result;
       }
-      return historicSqlOperations.get(handle);
+      return historicalQueryInfos.get(handle);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/java/org/apache/hive/service/cli/operation/QueryInfoCache.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/QueryInfoCache.java b/service/src/java/org/apache/hive/service/cli/operation/QueryInfoCache.java
new file mode 100644
index 0000000..6c9443a
--- /dev/null
+++ b/service/src/java/org/apache/hive/service/cli/operation/QueryInfoCache.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.service.cli.operation;
+
+import org.apache.hadoop.hive.ql.QueryInfo;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Cache some SQLOperation information for WebUI
+ */
+public class QueryInfoCache extends LinkedHashMap<String, QueryInfo> {
+
+  private final int capacity;
+
+  public QueryInfoCache(int capacity) {
+      super(capacity + 1, 1.1f, false);
+      this.capacity = capacity;
+  }
+
+  @Override
+  protected boolean removeEldestEntry(Map.Entry eldest) {
+    return size() > capacity;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
index d8718b3..0b51591 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.Schema;
 import org.apache.hadoop.hive.ql.CommandNeedRetryException;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.QueryDisplay;
+import org.apache.hadoop.hive.ql.QueryInfo;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.ExplainTask;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
@@ -58,7 +59,6 @@ import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.session.OperationLog;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
@@ -68,7 +68,6 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -97,8 +96,7 @@ public class SQLOperation extends ExecuteStatementOperation {
   private AbstractSerDe serde = null;
   private boolean fetchStarted = false;
   private volatile MetricsScope currentSQLStateScope;
-  // Display for WebUI.
-  private SQLOperationDisplay sqlOpDisplay;
+  private QueryInfo queryInfo;
   private long queryTimeout;
   private ScheduledExecutorService timeoutExecutor;
   private final boolean runAsync;
@@ -123,11 +121,9 @@ public class SQLOperation extends ExecuteStatementOperation {
     }
 
     setupSessionIO(parentSession.getSessionState());
-    try {
-      sqlOpDisplay = new SQLOperationDisplay(this);
-    } catch (HiveSQLException e) {
-      LOG.warn("Error calcluating SQL Operation Display for webui", e);
-    }
+
+    queryInfo = new QueryInfo(getState().toString(), getParentSession().getUserName(),
+            getExecutionEngine(), getHandle().getHandleIdentifier().toString());
 
     Metrics metrics = MetricsFactory.getInstance();
     if (metrics != null) {
@@ -157,13 +153,13 @@ public class SQLOperation extends ExecuteStatementOperation {
 
   /**
    * Compile the query and extract metadata
-   * @param sqlOperationConf
+   *
    * @throws HiveSQLException
    */
   public void prepare(QueryState queryState) throws HiveSQLException {
     setState(OperationState.RUNNING);
     try {
-      driver = new Driver(queryState, getParentSession().getUserName());
+      driver = new Driver(queryState, getParentSession().getUserName(), queryInfo);
 
       // Start the timer thread for cancelling the query when query timeout is reached
       // queryTimeout == 0 means no timeout
@@ -188,7 +184,7 @@ public class SQLOperation extends ExecuteStatementOperation {
         timeoutExecutor.schedule(timeoutTask, queryTimeout, TimeUnit.SECONDS);
       }
 
-      sqlOpDisplay.setQueryDisplay(driver.getQueryDisplay());
+      queryInfo.setQueryDisplay(driver.getQueryDisplay());
 
       // set the operation handle information in Driver, so that thrift API users
       // can use the operation handle they receive, to lookup query information in
@@ -379,8 +375,9 @@ public class SQLOperation extends ExecuteStatementOperation {
 
   /**
    * Returns the current UGI on the stack
-   * @param opConfig
+   *
    * @return UserGroupInformation
+   *
    * @throws HiveSQLException
    */
   private UserGroupInformation getCurrentUGI() throws HiveSQLException {
@@ -623,8 +620,8 @@ public class SQLOperation extends ExecuteStatementOperation {
   /**
    * Get summary information of this SQLOperation for display in WebUI.
    */
-  public SQLOperationDisplay getSQLOperationDisplay() {
-    return sqlOpDisplay;
+  public QueryInfo getQueryInfo() {
+    return queryInfo;
   }
 
   @Override
@@ -649,17 +646,17 @@ public class SQLOperation extends ExecuteStatementOperation {
 
     if (state == OperationState.FINISHED || state == OperationState.CANCELED || state == OperationState.ERROR) {
       //update runtime
-      sqlOpDisplay.setRuntime(getOperationComplete() - getOperationStart());
+      queryInfo.setRuntime(getOperationComplete() - getOperationStart());
       if (metrics != null && submittedQryScp != null) {
         metrics.endScope(submittedQryScp);
       }
     }
 
     if (state == OperationState.CLOSED) {
-      sqlOpDisplay.closed();
+      queryInfo.setEndTime();
     } else {
       //CLOSED state not interesting, state before (FINISHED, ERROR) is.
-      sqlOpDisplay.updateState(state);
+      queryInfo.updateState(state.toString());
     }
 
     if (state == OperationState.ERROR) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java
deleted file mode 100644
index 3cfeccc..0000000
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hive.service.cli.operation;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.QueryDisplay;
-import org.apache.hive.service.cli.HiveSQLException;
-import org.apache.hive.service.cli.OperationState;
-
-/**
- * Used to display some info in the HS2 WebUI.
- *
- * The class is synchronized, as WebUI may access information about a running query.
- */
-public class SQLOperationDisplay {
-  public final String userName;
-  public final String executionEngine;
-  public final long beginTime;
-  public final String operationId;
-  public Long runtime;  //tracks only running portion of the query.
-
-  public Long endTime;
-  public OperationState state;
-  public QueryDisplay queryDisplay;
-
-  public SQLOperationDisplay(SQLOperation sqlOperation) throws HiveSQLException {
-    this.state = sqlOperation.getState();
-    this.userName = sqlOperation.getParentSession().getUserName();
-    this.executionEngine = sqlOperation.getExecutionEngine();
-    this.beginTime = System.currentTimeMillis();
-    this.operationId = sqlOperation.getHandle().getHandleIdentifier().toString();
-  }
-
-  public synchronized long getElapsedTime() {
-    if (isRunning()) {
-      return System.currentTimeMillis() - beginTime;
-    } else {
-      return endTime - beginTime;
-    }
-  }
-
-  public synchronized boolean isRunning() {
-    return endTime == null;
-  }
-
-  public synchronized QueryDisplay getQueryDisplay() {
-    return queryDisplay;
-  }
-
-  public synchronized void setQueryDisplay(QueryDisplay queryDisplay) {
-    this.queryDisplay = queryDisplay;
-  }
-
-  public String getUserName() {
-    return userName;
-  }
-
-  public String getExecutionEngine() {
-    return executionEngine;
-  }
-
-  public synchronized OperationState getState() {
-    return state;
-  }
-
-  public long getBeginTime() {
-    return beginTime;
-  }
-
-  public synchronized Long getEndTime() {
-    return endTime;
-  }
-
-  public synchronized void updateState(OperationState state) {
-    this.state = state;
-  }
-
-  public String getOperationId() {
-    return operationId;
-  }
-
-  public synchronized void closed() {
-    this.endTime = System.currentTimeMillis();
-  }
-
-  public synchronized void setRuntime(long runtime) {
-    this.runtime = runtime;
-  }
-
-  public synchronized Long getRuntime() {
-    return runtime;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplayCache.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplayCache.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplayCache.java
deleted file mode 100644
index 4a33d37..0000000
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplayCache.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hive.service.cli.operation;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-/**
- * Cache some SQLOperation information for WebUI
- */
-public class SQLOperationDisplayCache extends LinkedHashMap<String, SQLOperationDisplay> {
-
-  private final int capacity;
-
-  public SQLOperationDisplayCache(int capacity) {
-      super(capacity + 1, 1.1f, false);
-      this.capacity = capacity;
-  }
-
-  @Override
-  protected boolean removeEldestEntry(Map.Entry eldest) {
-    return size() > capacity;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/java/org/apache/hive/service/servlet/QueryProfileServlet.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/servlet/QueryProfileServlet.java b/service/src/java/org/apache/hive/service/servlet/QueryProfileServlet.java
index 8fa447a..27a3b14 100644
--- a/service/src/java/org/apache/hive/service/servlet/QueryProfileServlet.java
+++ b/service/src/java/org/apache/hive/service/servlet/QueryProfileServlet.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hive.service.servlet;
 
+import org.apache.hadoop.hive.ql.QueryInfo;
 import org.apache.hive.service.cli.operation.OperationManager;
-import org.apache.hive.service.cli.operation.SQLOperationDisplay;
 import org.apache.hive.service.cli.session.SessionManager;
 import org.apache.hive.tmpl.QueryProfileTmpl;
 import org.slf4j.Logger;
@@ -46,12 +46,12 @@ public class QueryProfileServlet extends HttpServlet {
     SessionManager sessionManager =
       (SessionManager)ctx.getAttribute("hive.sm");
     OperationManager opManager = sessionManager.getOperationManager();
-    SQLOperationDisplay sod = opManager.getSQLOperationDisplay(opId);
-    if (sod == null) {
+    QueryInfo queryInfo = opManager.getQueryInfo(opId);
+    if (queryInfo == null) {
       LOG.debug("No display object found for operation {} ", opId);
       return;
     }
 
-    new QueryProfileTmpl().render(response.getWriter(), sod);
+    new QueryProfileTmpl().render(response.getWriter(), queryInfo);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp b/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
index 0e0803b..c0ece6d 100644
--- a/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
+++ b/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
@@ -24,7 +24,7 @@
   import="org.apache.hive.common.util.HiveVersionInfo"
   import="org.apache.hive.service.cli.operation.Operation"
   import="org.apache.hive.service.cli.operation.SQLOperation"
-  import="org.apache.hive.service.cli.operation.SQLOperationDisplay"
+  import="org.apache.hadoop.hive.ql.QueryInfo"
   import="org.apache.hive.service.cli.session.SessionManager"
   import="org.apache.hive.service.cli.session.HiveSession"
   import="javax.servlet.ServletContext"
@@ -141,8 +141,8 @@ for (HiveSession hiveSession: hiveSessions) {
     </tr>
     <%
       int queries = 0;
-      Collection<SQLOperationDisplay> operations = sessionManager.getOperationManager().getLiveSqlOperations();
-      for (SQLOperationDisplay operation : operations) {
+      Collection<QueryInfo> operations = sessionManager.getOperationManager().getLiveQueryInfos();
+      for (QueryInfo operation : operations) {
           queries++;
     %>
     <tr>
@@ -182,8 +182,8 @@ for (HiveSession hiveSession: hiveSessions) {
     </tr>
     <%
       queries = 0;
-      operations = sessionManager.getOperationManager().getHistoricalSQLOperations();
-      for (SQLOperationDisplay operation : operations) {
+      operations = sessionManager.getOperationManager().getHistoricalQueryInfos();
+      for (QueryInfo operation : operations) {
           queries++;
     %>
     <tr>

http://git-wip-us.apache.org/repos/asf/hive/blob/1fecb81f/service/src/test/org/apache/hive/service/cli/operation/TestQueryLifeTimeHooksWithSQLOperation.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/operation/TestQueryLifeTimeHooksWithSQLOperation.java b/service/src/test/org/apache/hive/service/cli/operation/TestQueryLifeTimeHooksWithSQLOperation.java
new file mode 100644
index 0000000..c4f5451
--- /dev/null
+++ b/service/src/test/org/apache/hive/service/cli/operation/TestQueryLifeTimeHooksWithSQLOperation.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookWithParseHooks;
+import org.apache.hadoop.hive.ql.hooks.TestQueryHooks;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.session.HiveSession;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+
+public class TestQueryLifeTimeHooksWithSQLOperation {
+
+  private static final String QUERY = "select 1";
+
+  @Test
+  public void testQueryInfoInHookContext() throws IllegalAccessException, ClassNotFoundException, InstantiationException, HiveSQLException {
+    HiveConf conf = new HiveConf(TestQueryHooks.class);
+    conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
+            "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
+    conf.set(HiveConf.ConfVars.HIVE_QUERY_LIFETIME_HOOKS.varname, QueryInfoVerificationHook.class.getName());
+
+    SessionState.start(conf);
+    HiveSession mockHiveSession = mock(HiveSession.class);
+    when(mockHiveSession.getHiveConf()).thenReturn(conf);
+    when(mockHiveSession.getSessionState()).thenReturn(SessionState.get());
+    SQLOperation sqlOperation = new SQLOperation(mockHiveSession, QUERY, ImmutableMap.of(), false, 0);
+    sqlOperation.run();
+  }
+
+  /**
+   * Has to be public so that it can be created by the Driver
+   */
+  public final static class QueryInfoVerificationHook implements QueryLifeTimeHookWithParseHooks {
+
+    @Override
+    public void beforeParse(QueryLifeTimeHookContext ctx) {
+      assertNotNull(ctx);
+      assertEquals(ctx.getCommand().trim(), QUERY);
+    }
+
+    @Override
+    public void afterParse(QueryLifeTimeHookContext ctx, boolean hasError) {
+      assertNotNull(ctx);
+      assertEquals(ctx.getCommand().trim(), QUERY);
+      assertFalse(hasError);
+    }
+
+    @Override
+    public void beforeCompile(QueryLifeTimeHookContext ctx) {
+      assertNotNull(ctx);
+      assertEquals(ctx.getCommand().trim(), QUERY);
+    }
+
+    @Override
+    public void afterCompile(QueryLifeTimeHookContext ctx, boolean hasError) {
+      assertNotNull(ctx);
+      assertEquals(ctx.getCommand().trim(), QUERY);
+      assertFalse(hasError);
+    }
+
+    @Override
+    public void beforeExecution(QueryLifeTimeHookContext ctx) {
+      assertNotNull(ctx);
+      assertEquals(ctx.getCommand().trim(), QUERY);
+      assertNotNull(ctx.getHookContext());
+      assertNotNull(ctx.getHookContext().getQueryInfo());
+      assertNotNull(ctx.getHookContext().getQueryInfo().getQueryDisplay());
+    }
+
+    @Override
+    public void afterExecution(QueryLifeTimeHookContext ctx, boolean hasError) {
+      assertNotNull(ctx);
+      assertEquals(ctx.getCommand().trim(), QUERY);
+      assertFalse(hasError);
+      assertNotNull(ctx.getHookContext());
+      assertNull(ctx.getHookContext().getErrorMessage());
+      assertNull(ctx.getHookContext().getException());
+      assertNotNull(ctx.getHookContext().getQueryInfo());
+      assertNotNull(ctx.getHookContext().getQueryInfo().getQueryDisplay());
+    }
+  }
+}


[35/50] [abbrv] hive git commit: HIVE-16588: Ressource leak by druid http client (Slim Bouguerra, reviewed by Jesus Camacho Rodriguez)

Posted by we...@apache.org.
HIVE-16588: Ressource leak by druid http client (Slim Bouguerra, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/57beac4e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/57beac4e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/57beac4e

Branch: refs/heads/hive-14535
Commit: 57beac4efe69796e4f7a5ea8e5ff67819f55a6a3
Parents: 301e7c5
Author: Slim Bouguerra <sl...@gmail.com>
Authored: Mon May 8 16:08:26 2017 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Mon May 8 16:08:53 2017 +0100

----------------------------------------------------------------------
 .../hadoop/hive/druid/DruidStorageHandler.java  | 43 ++++++++------
 .../druid/io/DruidQueryBasedInputFormat.java    | 60 ++------------------
 .../druid/serde/DruidQueryRecordReader.java     | 20 +------
 .../hadoop/hive/druid/serde/DruidSerDe.java     | 18 +-----
 .../hive/druid/TestDruidStorageHandler.java     | 19 ++-----
 5 files changed, 41 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/57beac4e/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
index daee2fe..4510db3 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hive.common.util.ShutdownHookManager;
 import org.joda.time.DateTime;
 import org.joda.time.DateTimeZone;
 import org.joda.time.Period;
@@ -91,13 +92,23 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
   protected static final SessionState.LogHelper console = new SessionState.LogHelper(LOG);
 
   public static final String SEGMENTS_DESCRIPTOR_DIR_NAME = "segmentsDescriptorDir";
+  private static final HttpClient HTTP_CLIENT;
+  static {
+    final Lifecycle lifecycle = new Lifecycle();
+    try {
+      lifecycle.start();
+    } catch (Exception e) {
+      LOG.error("Issues with lifecycle start", e);
+    }
+    HTTP_CLIENT = makeHttpClient(lifecycle);
+    ShutdownHookManager.addShutdownHook(()-> lifecycle.stop());
+  }
+
 
   private final SQLMetadataConnector connector;
 
   private final MetadataStorageTablesConfig druidMetadataStorageTablesConfig;
 
-  private HttpClient httpClient;
-
   private String uniqueId = null;
 
   private String rootWorkingDir = null;
@@ -151,12 +162,10 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
 
   @VisibleForTesting
   public DruidStorageHandler(SQLMetadataConnector connector,
-          MetadataStorageTablesConfig druidMetadataStorageTablesConfig,
-          HttpClient httpClient
+          MetadataStorageTablesConfig druidMetadataStorageTablesConfig
   ) {
     this.connector = connector;
     this.druidMetadataStorageTablesConfig = druidMetadataStorageTablesConfig;
-    this.httpClient = httpClient;
   }
 
   @Override
@@ -280,19 +289,12 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
       int maxTries = HiveConf.getIntVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_MAX_TRIES);
       LOG.info(String.format("checking load status from coordinator [%s]", coordinatorAddress));
 
-      // check if the coordinator is up
-      httpClient = makeHttpClient(lifecycle);
-      try {
-        lifecycle.start();
-      } catch (Exception e) {
-        Throwables.propagate(e);
-      }
       String coordinatorResponse = null;
       try {
         coordinatorResponse = RetryUtils.retry(new Callable<String>() {
           @Override
           public String call() throws Exception {
-            return DruidStorageHandlerUtils.getURL(httpClient,
+            return DruidStorageHandlerUtils.getURL(getHttpClient(),
                     new URL(String.format("http://%s/status", coordinatorAddress))
             );
           }
@@ -347,7 +349,7 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
           @Override
           public boolean apply(URL input) {
             try {
-              String result = DruidStorageHandlerUtils.getURL(httpClient, input);
+              String result = DruidStorageHandlerUtils.getURL(getHttpClient(), input);
               LOG.debug(String.format("Checking segment [%s] response is [%s]", input, result));
               return Strings.isNullOrEmpty(result);
             } catch (IOException e) {
@@ -586,15 +588,18 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
     return rootWorkingDir;
   }
 
-  private HttpClient makeHttpClient(Lifecycle lifecycle) {
+  private static HttpClient makeHttpClient(Lifecycle lifecycle) {
     final int numConnection = HiveConf
-            .getIntVar(getConf(),
+            .getIntVar(SessionState.getSessionConf(),
                     HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION
             );
     final Period readTimeout = new Period(
-            HiveConf.getVar(getConf(),
+            HiveConf.getVar(SessionState.getSessionConf(),
                     HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT
             ));
+    LOG.info("Creating Druid HTTP client with {} max parallel connections and {}ms read timeout",
+            numConnection, readTimeout.toStandardDuration().getMillis()
+    );
 
     return HttpClientInit.createClient(
             HttpClientConfig.builder().withNumConnections(numConnection)
@@ -602,4 +607,8 @@ public class DruidStorageHandler extends DefaultHiveMetaHook implements HiveStor
             lifecycle
     );
   }
+
+  public static HttpClient getHttpClient() {
+    return HTTP_CLIENT;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/57beac4e/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
index 53624e1..2f53616 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.druid.DruidStorageHandler;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.hive.druid.serde.DruidGroupByQueryRecordReader;
 import org.apache.hadoop.hive.druid.serde.DruidQueryRecordReader;
@@ -193,23 +194,6 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
               new String[]{address} ) };
     }
 
-    // Properties from configuration
-    final int numConnection = HiveConf.getIntVar(conf,
-            HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION);
-    final Period readTimeout = new Period(
-            HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT));
-
-    // Create request to obtain nodes that are holding data for the given datasource and intervals
-    final Lifecycle lifecycle = new Lifecycle();
-    final HttpClient client = HttpClientInit.createClient(
-            HttpClientConfig.builder().withNumConnections(numConnection)
-                    .withReadTimeout(readTimeout.toStandardDuration()).build(), lifecycle);
-    try {
-      lifecycle.start();
-    } catch (Exception e) {
-      LOG.error("Lifecycle start issue");
-      throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-    }
     final String intervals =
             StringUtils.join(query.getIntervals(), ","); // Comma-separated intervals without brackets
     final String request = String.format(
@@ -217,9 +201,8 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
             address, query.getDataSource().getNames().get(0), URLEncoder.encode(intervals, "UTF-8"));
     final InputStream response;
     try {
-      response = DruidStorageHandlerUtils.submitRequest(client, new Request(HttpMethod.GET, new URL(request)));
+      response = DruidStorageHandlerUtils.submitRequest(DruidStorageHandler.getHttpClient(), new Request(HttpMethod.GET, new URL(request)));
     } catch (Exception e) {
-      lifecycle.stop();
       throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
     }
 
@@ -231,8 +214,6 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
     } catch (Exception e) {
       response.close();
       throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-    } finally {
-      lifecycle.stop();
     }
 
     // Create one input split for each segment
@@ -260,12 +241,8 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
   private static HiveDruidSplit[] splitSelectQuery(Configuration conf, String address,
           SelectQuery query, Path dummyPath
   ) throws IOException {
-    final int selectThreshold = (int) HiveConf.getIntVar(
+    final int selectThreshold = HiveConf.getIntVar(
             conf, HiveConf.ConfVars.HIVE_DRUID_SELECT_THRESHOLD);
-    final int numConnection = HiveConf
-            .getIntVar(conf, HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION);
-    final Period readTimeout = new Period(
-            HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT));
 
     final boolean isFetch = query.getContextBoolean(Constants.DRUID_QUERY_FETCH, false);
     if (isFetch) {
@@ -283,23 +260,12 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
     metadataBuilder.merge(true);
     metadataBuilder.analysisTypes();
     SegmentMetadataQuery metadataQuery = metadataBuilder.build();
-    Lifecycle lifecycle = new Lifecycle();
-    HttpClient client = HttpClientInit.createClient(
-            HttpClientConfig.builder().withNumConnections(numConnection)
-                    .withReadTimeout(readTimeout.toStandardDuration()).build(), lifecycle);
-    try {
-      lifecycle.start();
-    } catch (Exception e) {
-      LOG.error("Lifecycle start issue");
-      throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-    }
     InputStream response;
     try {
-      response = DruidStorageHandlerUtils.submitRequest(client,
+      response = DruidStorageHandlerUtils.submitRequest(DruidStorageHandler.getHttpClient(),
               DruidStorageHandlerUtils.createRequest(address, metadataQuery)
       );
     } catch (Exception e) {
-      lifecycle.stop();
       throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
     }
 
@@ -313,8 +279,6 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
     } catch (Exception e) {
       response.close();
       throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-    } finally {
-      lifecycle.stop();
     }
     if (metadataList == null) {
       throw new IOException("Connected to Druid but could not retrieve datasource information");
@@ -350,23 +314,11 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
       TimeBoundaryQueryBuilder timeBuilder = new Druids.TimeBoundaryQueryBuilder();
       timeBuilder.dataSource(query.getDataSource());
       TimeBoundaryQuery timeQuery = timeBuilder.build();
-
-      lifecycle = new Lifecycle();
-      client = HttpClientInit.createClient(
-              HttpClientConfig.builder().withNumConnections(numConnection)
-                      .withReadTimeout(readTimeout.toStandardDuration()).build(), lifecycle);
-      try {
-        lifecycle.start();
-      } catch (Exception e) {
-        LOG.error("Lifecycle start issue");
-        throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-      }
       try {
-        response = DruidStorageHandlerUtils.submitRequest(client,
+        response = DruidStorageHandlerUtils.submitRequest(DruidStorageHandler.getHttpClient(),
                 DruidStorageHandlerUtils.createRequest(address, timeQuery)
         );
       } catch (Exception e) {
-        lifecycle.stop();
         throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
       }
 
@@ -380,8 +332,6 @@ public class DruidQueryBasedInputFormat extends InputFormat<NullWritable, DruidW
       } catch (Exception e) {
         response.close();
         throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-      } finally {
-        lifecycle.stop();
       }
       if (timeList == null || timeList.isEmpty()) {
         throw new IOException(

http://git-wip-us.apache.org/repos/asf/hive/blob/57beac4e/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java
index 8d099c7..103591d 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidQueryRecordReader.java
@@ -25,6 +25,7 @@ import com.metamx.http.client.HttpClientInit;
 import io.druid.query.BaseQuery;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.druid.DruidStorageHandler;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.hive.druid.io.HiveDruidSplit;
 import org.apache.hadoop.io.NullWritable;
@@ -81,26 +82,11 @@ public abstract class DruidQueryRecordReader<T extends BaseQuery<R>, R extends C
       LOG.info("Retrieving from druid using query:\n " + query);
     }
 
-    final Lifecycle lifecycle = new Lifecycle();
-    final int numConnection = HiveConf
-            .getIntVar(conf, HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION);
-    final Period readTimeout = new Period(
-            HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT));
-
-    HttpClient client = HttpClientInit.createClient(
-            HttpClientConfig.builder().withReadTimeout(readTimeout.toStandardDuration())
-                    .withNumConnections(numConnection).build(), lifecycle);
-    try {
-      lifecycle.start();
-    } catch (Exception e) {
-      LOG.error("Issues with lifecycle start", e);
-    }
     InputStream response;
     try {
-      response = DruidStorageHandlerUtils.submitRequest(client,
+      response = DruidStorageHandlerUtils.submitRequest(DruidStorageHandler.getHttpClient(),
               DruidStorageHandlerUtils.createRequest(hiveDruidSplit.getLocations()[0], query));
     } catch (Exception e) {
-      lifecycle.stop();
       throw new IOException(org.apache.hadoop.util.StringUtils.stringifyException(e));
     }
 
@@ -111,8 +97,6 @@ public abstract class DruidQueryRecordReader<T extends BaseQuery<R>, R extends C
     } catch (IOException e) {
       response.close();
       throw e;
-    } finally {
-      lifecycle.stop();
     }
     if (resultsList == null || resultsList.isEmpty()) {
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/57beac4e/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
index bbe29b6..656c0f1 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.druid.DruidStorageHandler;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -100,20 +101,12 @@ public class DruidSerDe extends AbstractSerDe {
 
   protected static final Logger LOG = LoggerFactory.getLogger(DruidSerDe.class);
 
-  private int numConnection;
-  private Period readTimeout;
-
   private String[] columns;
   private PrimitiveTypeInfo[] types;
   private ObjectInspector inspector;
 
   @Override
   public void initialize(Configuration configuration, Properties properties) throws SerDeException {
-    // Init connection properties
-    numConnection = HiveConf
-          .getIntVar(configuration, HiveConf.ConfVars.HIVE_DRUID_NUM_HTTP_CONNECTION);
-    readTimeout = new Period(
-          HiveConf.getVar(configuration, HiveConf.ConfVars.HIVE_DRUID_HTTP_READ_TIMEOUT));
 
     final List<String> columnNames = new ArrayList<>();
     final List<PrimitiveTypeInfo> columnTypes = new ArrayList<>();
@@ -256,20 +249,13 @@ public class DruidSerDe extends AbstractSerDe {
   /* Submits the request and returns */
   protected SegmentAnalysis submitMetadataRequest(String address, SegmentMetadataQuery query)
           throws SerDeException, IOException {
-    final Lifecycle lifecycle = new Lifecycle();
-    HttpClient client = HttpClientInit.createClient(
-            HttpClientConfig.builder().withNumConnections(numConnection)
-                    .withReadTimeout(readTimeout.toStandardDuration()).build(), lifecycle);
     InputStream response;
     try {
-      lifecycle.start();
-      response = DruidStorageHandlerUtils.submitRequest(client,
+      response = DruidStorageHandlerUtils.submitRequest(DruidStorageHandler.getHttpClient(),
               DruidStorageHandlerUtils.createRequest(address, query)
       );
     } catch (Exception e) {
       throw new SerDeException(StringUtils.stringifyException(e));
-    } finally {
-      lifecycle.stop();
     }
 
     // Retrieve results

http://git-wip-us.apache.org/repos/asf/hive/blob/57beac4e/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
----------------------------------------------------------------------
diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
index 05e3ec5..1fe155a 100644
--- a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
+++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
@@ -24,7 +24,6 @@ import com.google.common.collect.Lists;
 import io.druid.indexer.JobHelper;
 import io.druid.indexer.SQLMetadataStorageUpdaterJobHandler;
 import io.druid.metadata.MetadataStorageTablesConfig;
-import io.druid.metadata.SQLMetadataSegmentManager;
 import io.druid.segment.loading.SegmentLoadingException;
 import io.druid.timeline.DataSegment;
 import io.druid.timeline.partition.NoneShardSpec;
@@ -94,8 +93,7 @@ public class TestDruidStorageHandler {
   public void testPreCreateTableWillCreateSegmentsTable() throws MetaException {
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
-            derbyConnectorRule.metadataTablesConfigSupplier().get(),
-            null
+            derbyConnectorRule.metadataTablesConfigSupplier().get()
     );
 
     try (Handle handle = derbyConnectorRule.getConnector().getDBI().open()) {
@@ -122,8 +120,7 @@ public class TestDruidStorageHandler {
     );
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
-            derbyConnectorRule.metadataTablesConfigSupplier().get(),
-            null
+            derbyConnectorRule.metadataTablesConfigSupplier().get()
     );
     druidStorageHandler.preCreateTable(tableMock);
   }
@@ -133,8 +130,7 @@ public class TestDruidStorageHandler {
           throws MetaException, IOException {
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
-            derbyConnectorRule.metadataTablesConfigSupplier().get(),
-            null
+            derbyConnectorRule.metadataTablesConfigSupplier().get()
     );
     druidStorageHandler.preCreateTable(tableMock);
     Configuration config = new Configuration();
@@ -164,8 +160,7 @@ public class TestDruidStorageHandler {
   public void testCommitInsertTable() throws MetaException, IOException {
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
-            derbyConnectorRule.metadataTablesConfigSupplier().get(),
-            null
+            derbyConnectorRule.metadataTablesConfigSupplier().get()
     );
     druidStorageHandler.preCreateTable(tableMock);
     Configuration config = new Configuration();
@@ -189,8 +184,7 @@ public class TestDruidStorageHandler {
   public void testDeleteSegment() throws IOException, SegmentLoadingException {
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
             derbyConnectorRule.getConnector(),
-            derbyConnectorRule.metadataTablesConfigSupplier().get(),
-            null
+            derbyConnectorRule.metadataTablesConfigSupplier().get()
     );
 
     String segmentRootPath = temporaryFolder.newFolder().getAbsolutePath();
@@ -234,8 +228,7 @@ public class TestDruidStorageHandler {
 
     DruidStorageHandler druidStorageHandler = new DruidStorageHandler(
         connector,
-        metadataStorageTablesConfig,
-        null
+        metadataStorageTablesConfig
     );
     druidStorageHandler.preCreateTable(tableMock);
     Configuration config = new Configuration();


[02/50] [abbrv] hive git commit: HIVE-16563: Alter table partition set location should use fully qualified path for non-default FS (Chao Sun, reviewed by Xuefu Zhang)

Posted by we...@apache.org.
HIVE-16563: Alter table partition set location should use fully qualified path for non-default FS (Chao Sun, reviewed by Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/40b70eb1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/40b70eb1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/40b70eb1

Branch: refs/heads/hive-14535
Commit: 40b70eb18172b593d4184254190fe89990d8ffc5
Parents: 1af9802
Author: Chao Sun <su...@apache.org>
Authored: Mon May 1 13:16:11 2017 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Tue May 2 15:55:37 2017 -0700

----------------------------------------------------------------------
 ...estDDLWithRemoteMetastoreSecondNamenode.java | 31 ++++++++++++++++++++
 .../apache/hadoop/hive/ql/metadata/Hive.java    | 10 +++++++
 2 files changed, 41 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/40b70eb1/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
index bfb25aa..ce8fe60 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
@@ -52,6 +52,7 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
   private static final String Table4Name = "table4_nondefault_nn";
   private static final String Table5Name = "table5_nondefault_nn";
   private static final String Table6Name = "table6_nondefault_nn";
+  private static final String Table7Name = "table7_nondefault_nn";
   private static final String Index1Name = "index1_table1_nondefault_nn";
   private static final String Index2Name = "index2_table1_nondefault_nn";
   private static final String tmpdir = System.getProperty("test.tmp.dir");
@@ -197,6 +198,27 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
     }
   }
 
+  private void alterPartitionAndCheck(Table table, String column,
+      String value, String location) throws CommandNeedRetryException, HiveException {
+    assertNotNull(location);
+    executeQuery("ALTER TABLE " + table.getTableName() +
+        " PARTITION (" + column + "='" + value + "')" +
+        " SET LOCATION '" + location + "'");
+    HashMap<String, String> partitions = new HashMap<String, String>();
+    partitions.put(column, value);
+    Partition partition = db.getPartition(table, partitions, false);
+    assertNotNull("Partition object is expected for " + table.getTableName() , partition);
+    String locationActual = partition.getLocation();
+    if (new Path(location).toUri().getScheme() != null) {
+      assertEquals("Partition should be located in the first filesystem",
+          fs.makeQualified(new Path(location)).toString(), locationActual);
+    }
+    else {
+      assertEquals("Partition should be located in the second filesystem",
+          fs2.makeQualified(new Path(location)).toString(), locationActual);
+    }
+  }
+
   private Table createTableAndCheck(String tableName, String tableLocation)
           throws CommandNeedRetryException, HiveException, URISyntaxException {
     return createTableAndCheck(null, tableName, tableLocation);
@@ -294,6 +316,15 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
     createTableAndCheck(table1, Table6Name, null);
   }
 
+  public void testAlterPartitionSetLocationNonDefaultNameNode() throws Exception {
+    assertTrue("Test suite should have been initialized", isInitialized);
+    String tableLocation = tmppathFs2 + "/" + "test_set_part_loc";
+    Table table = createTableAndCheck(Table7Name, tableLocation);
+
+    addPartitionAndCheck(table, "p", "p1", "/tmp/test/1");
+    alterPartitionAndCheck(table, "p", "p1", "/tmp/test/2");
+  }
+
   public void testCreateDatabaseWithTableNonDefaultNameNode() throws Exception {
     assertTrue("Test suite should be initialied", isInitialized );
     final String tableLocation = tmppathFs2 + "/" + Table3Name;

http://git-wip-us.apache.org/repos/asf/hive/blob/40b70eb1/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index dec73a7..5b49dfd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -690,6 +690,11 @@ public class Hive {
       throws InvalidOperationException, HiveException {
     try {
       validatePartition(newPart);
+      String location = newPart.getLocation();
+      if (location != null && !Utilities.isDefaultNameNode(conf)) {
+        location = Utilities.getQualifiedPath(conf, new Path(location));
+        newPart.setLocation(location);
+      }
       getMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext);
 
     } catch (MetaException e) {
@@ -729,6 +734,11 @@ public class Hive {
         if (tmpPart.getParameters() != null) {
           tmpPart.getParameters().remove(hive_metastoreConstants.DDL_TIME);
         }
+        String location = tmpPart.getLocation();
+        if (location != null && !Utilities.isDefaultNameNode(conf)) {
+          location = Utilities.getQualifiedPath(conf, new Path(location));
+          tmpPart.setLocation(location);
+        }
         newTParts.add(tmpPart.getTPartition());
       }
       getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext);


[18/50] [abbrv] hive git commit: HIVE-16268 : enable incremental repl dump to handle functions metadata (Anishek Agarwal, reviewed by Sushanth Sowmyan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java
deleted file mode 100644
index 29f3b42..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandler.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-
-import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-public interface EventHandler {
-  void handle(Context withinContext) throws Exception;
-
-  long fromEventId();
-
-  long toEventId();
-
-  DumpType dumpType();
-
-  class Context {
-    final Path eventRoot, cmRoot;
-    final Hive db;
-    final HiveConf hiveConf;
-    final ReplicationSpec replicationSpec;
-
-    public Context(Path eventRoot, Path cmRoot, Hive db, HiveConf hiveConf,
-        ReplicationSpec replicationSpec) {
-      this.eventRoot = eventRoot;
-      this.cmRoot = cmRoot;
-      this.db = db;
-      this.hiveConf = hiveConf;
-      this.replicationSpec = replicationSpec;
-    }
-
-    DumpMetaData createDmd(EventHandler eventHandler) {
-      return new DumpMetaData(
-          eventRoot,
-          eventHandler.dumpType(),
-          eventHandler.fromEventId(),
-          eventHandler.toEventId(),
-          cmRoot, hiveConf
-      );
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandlerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandlerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandlerFactory.java
deleted file mode 100644
index 53adea8..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/EventHandlerFactory.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
-
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Modifier;
-import java.util.HashMap;
-import java.util.Map;
-
-public class EventHandlerFactory {
-  private EventHandlerFactory() {
-  }
-
-  private static Map<String, Class<? extends EventHandler>> registeredHandlers = new HashMap<>();
-
-  static {
-    register(MessageFactory.ADD_PARTITION_EVENT, AddPartitionHandler.class);
-    register(MessageFactory.ALTER_PARTITION_EVENT, AlterPartitionHandler.class);
-    register(MessageFactory.ALTER_TABLE_EVENT, AlterTableHandler.class);
-    register(MessageFactory.CREATE_TABLE_EVENT, CreateTableHandler.class);
-    register(MessageFactory.DROP_PARTITION_EVENT, DropPartitionHandler.class);
-    register(MessageFactory.DROP_TABLE_EVENT, DropTableHandler.class);
-    register(MessageFactory.INSERT_EVENT, InsertHandler.class);
-  }
-
-  static void register(String event, Class<? extends EventHandler> handlerClazz) {
-    try {
-      Constructor<? extends EventHandler> constructor =
-          handlerClazz.getDeclaredConstructor(NotificationEvent.class);
-      assert constructor != null;
-      assert !Modifier.isPrivate(constructor.getModifiers());
-      registeredHandlers.put(event, handlerClazz);
-    } catch (NoSuchMethodException e) {
-      throw new IllegalArgumentException("handler class: " + handlerClazz.getCanonicalName()
-          + " does not have the a constructor with only parameter of type:"
-          + NotificationEvent.class.getCanonicalName(), e);
-    }
-  }
-
-  public static EventHandler handlerFor(NotificationEvent event) {
-    if (registeredHandlers.containsKey(event.getEventType())) {
-      Class<? extends EventHandler> handlerClazz = registeredHandlers.get(event.getEventType());
-      try {
-        Constructor<? extends EventHandler> constructor =
-            handlerClazz.getDeclaredConstructor(NotificationEvent.class);
-        return constructor.newInstance(event);
-      } catch (NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException e) {
-        // this should never happen. however we want to make sure we propagate the exception
-        throw new RuntimeException(
-            "failed when creating handler for " + event.getEventType()
-                + " with the responsible class being " + handlerClazz.getCanonicalName(), e);
-      }
-    }
-    return new DefaultHandler(event);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java
deleted file mode 100644
index 910b396..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/InsertHandler.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.parse.EximUtil;
-import org.apache.thrift.TException;
-
-import java.io.BufferedWriter;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
-
-public class InsertHandler extends AbstractHandler {
-
-  InsertHandler(NotificationEvent event) {
-    super(event);
-  }
-
-  @Override
-  public void handle(Context withinContext) throws Exception {
-    InsertMessage insertMsg = deserializer.getInsertMessage(event.getMessage());
-    org.apache.hadoop.hive.ql.metadata.Table qlMdTable = tableObject(withinContext, insertMsg);
-    Map<String, String> partSpec = insertMsg.getPartitionKeyValues();
-    List<Partition> qlPtns = null;
-    if (qlMdTable.isPartitioned() && !partSpec.isEmpty()) {
-      qlPtns = Collections.singletonList(withinContext.db.getPartition(qlMdTable, partSpec, false));
-    }
-    Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
-
-    // Mark the replace type based on INSERT-INTO or INSERT_OVERWRITE operation
-    withinContext.replicationSpec.setIsReplace(insertMsg.isReplace());
-    EximUtil.createExportDump(metaDataPath.getFileSystem(withinContext.hiveConf), metaDataPath,
-        qlMdTable, qlPtns,
-        withinContext.replicationSpec);
-    Iterable<String> files = insertMsg.getFiles();
-
-    if (files != null) {
-      Path dataPath;
-      if ((null == qlPtns) || qlPtns.isEmpty()) {
-        dataPath = new Path(withinContext.eventRoot, EximUtil.DATA_PATH_NAME);
-      } else {
-        /*
-         * Insert into/overwrite operation shall operate on one or more partitions or even partitions from multiple
-         * tables. But, Insert event is generated for each partition to which the data is inserted. So, qlPtns list
-         * will have only one entry.
-         */
-        assert(1 == qlPtns.size());
-        dataPath = new Path(withinContext.eventRoot, qlPtns.get(0).getName());
-      }
-
-      // encoded filename/checksum of files, write into _files
-      try (BufferedWriter fileListWriter = writer(withinContext, dataPath)) {
-        for (String file : files) {
-          fileListWriter.write(file + "\n");
-        }
-      }
-    }
-
-    LOG.info("Processing#{} INSERT message : {}", fromEventId(), event.getMessage());
-    DumpMetaData dmd = withinContext.createDmd(this);
-    dmd.setPayload(event.getMessage());
-    dmd.write();
-  }
-
-  private org.apache.hadoop.hive.ql.metadata.Table tableObject(
-      Context withinContext, InsertMessage insertMsg) throws TException {
-    return new org.apache.hadoop.hive.ql.metadata.Table(
-        withinContext.db.getMSC().getTable(
-            insertMsg.getDB(), insertMsg.getTable()
-        )
-    );
-  }
-
-  private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException {
-    Path filesPath = new Path(dataPath, EximUtil.FILES_NAME);
-    FileSystem fs = dataPath.getFileSystem(withinContext.hiveConf);
-    return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath)));
-  }
-
-  @Override
-  public DumpType dumpType() {
-    return DumpType.EVENT_INSERT;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbstractMessageHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbstractMessageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbstractMessageHandler.java
new file mode 100644
index 0000000..95e51e4
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbstractMessageHandler.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
+import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
+import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+abstract class AbstractMessageHandler implements MessageHandler {
+  final HashSet<ReadEntity> readEntitySet = new HashSet<>();
+  final HashSet<WriteEntity> writeEntitySet = new HashSet<>();
+  final Map<String, Long> tablesUpdated = new HashMap<>(),
+      databasesUpdated = new HashMap<>();
+  final MessageDeserializer deserializer = MessageFactory.getInstance().getDeserializer();
+
+  @Override
+  public Set<ReadEntity> readEntities() {
+    return readEntitySet;
+  }
+
+  @Override
+  public Set<WriteEntity> writeEntities() {
+    return writeEntitySet;
+  }
+
+  @Override
+  public Map<String, Long> tablesUpdated() {
+    return tablesUpdated;
+  }
+
+  @Override
+  public Map<String, Long> databasesUpdated() {
+    return databasesUpdated;
+  }
+
+  ReplicationSpec eventOnlyReplicationSpec(Context forContext) throws SemanticException {
+    String eventId = forContext.dmd.getEventTo().toString();
+    return replicationSpec(eventId, eventId);
+  }
+
+  private ReplicationSpec replicationSpec(String fromId, String toId) throws SemanticException {
+    return new ReplicationSpec(true, false, fromId, toId, false, true, false);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DefaultHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DefaultHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DefaultHandler.java
new file mode 100644
index 0000000..6d346b6
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DefaultHandler.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+class DefaultHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context withinContext)
+      throws SemanticException {
+    return new ArrayList<>();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
new file mode 100644
index 0000000..73f2613
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+class DropPartitionHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context context)
+      throws SemanticException {
+    try {
+      DropPartitionMessage msg = deserializer.getDropPartitionMessage(context.dmd.getPayload());
+      String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
+      String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
+      Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs =
+          genPartSpecs(new Table(msg.getTableObj()),
+              msg.getPartitions());
+      if (partSpecs.size() > 0) {
+        DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + actualTblName,
+            partSpecs, null, true, eventOnlyReplicationSpec(context));
+        Task<DDLWork> dropPtnTask = TaskFactory.get(
+            new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc),
+            context.hiveConf
+        );
+        context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(),
+            dropPtnDesc.getTableName(), msg.getPartitions());
+        databasesUpdated.put(actualDbName, context.dmd.getEventTo());
+        tablesUpdated.put(actualDbName + "." + actualTblName, context.dmd.getEventTo());
+        return Collections.singletonList(dropPtnTask);
+      } else {
+        throw new SemanticException(
+            "DROP PARTITION EVENT does not return any part descs for event message :"
+                + context.dmd.getPayload());
+      }
+    } catch (Exception e) {
+      throw (e instanceof SemanticException)
+          ? (SemanticException) e
+          : new SemanticException("Error reading message members", e);
+    }
+  }
+
+  private Map<Integer, List<ExprNodeGenericFuncDesc>> genPartSpecs(Table table,
+      List<Map<String, String>> partitions) throws SemanticException {
+    Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs = new HashMap<>();
+    int partPrefixLength = 0;
+    if (partitions.size() > 0) {
+      partPrefixLength = partitions.get(0).size();
+      // pick the length of the first ptn, we expect all ptns listed to have the same number of
+      // key-vals.
+    }
+    List<ExprNodeGenericFuncDesc> partitionDesc = new ArrayList<>();
+    for (Map<String, String> ptn : partitions) {
+      // convert each key-value-map to appropriate expression.
+      ExprNodeGenericFuncDesc expr = null;
+      for (Map.Entry<String, String> kvp : ptn.entrySet()) {
+        String key = kvp.getKey();
+        Object val = kvp.getValue();
+        String type = table.getPartColByName(key).getType();
+        PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type);
+        ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, null, true);
+        ExprNodeGenericFuncDesc op = DDLSemanticAnalyzer.makeBinaryPredicate(
+            "=", column, new ExprNodeConstantDesc(pti, val));
+        expr = (expr == null) ? op : DDLSemanticAnalyzer.makeBinaryPredicate("and", expr, op);
+      }
+      if (expr != null) {
+        partitionDesc.add(expr);
+      }
+    }
+    if (partitionDesc.size() > 0) {
+      partSpecs.put(partPrefixLength, partitionDesc);
+    }
+    return partSpecs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
new file mode 100644
index 0000000..b623f2f
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.messaging.DropTableMessage;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.plan.DropTableDesc;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.List;
+
+class DropTableHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context context)
+      throws SemanticException {
+    DropTableMessage msg = deserializer.getDropTableMessage(context.dmd.getPayload());
+    String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
+    String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
+    DropTableDesc dropTableDesc = new DropTableDesc(
+        actualDbName + "." + actualTblName,
+        null, true, true,
+        eventOnlyReplicationSpec(context));
+    Task<DDLWork> dropTableTask = TaskFactory.get(
+        new DDLWork(readEntitySet, writeEntitySet, dropTableDesc),
+        context.hiveConf
+    );
+    context.log
+        .debug("Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName());
+    databasesUpdated.put(actualDbName, context.dmd.getEventTo());
+    return Collections.singletonList(dropTableTask);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java
new file mode 100644
index 0000000..fa63169
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/InsertHandler.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+import java.io.Serializable;
+import java.util.List;
+
+class InsertHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context withinContext)
+      throws SemanticException {
+    InsertMessage insertMessage = deserializer.getInsertMessage(withinContext.dmd.getPayload());
+    String actualDbName =
+        withinContext.isDbNameEmpty() ? insertMessage.getDB() : withinContext.dbName;
+    String actualTblName =
+        withinContext.isTableNameEmpty() ? insertMessage.getTable() : withinContext.tableName;
+
+    Context currentContext = new Context(withinContext, actualDbName, actualTblName);
+    // Piggybacking in Import logic for now
+    TableHandler tableHandler = new TableHandler();
+    List<Task<? extends Serializable>> tasks = tableHandler.handle(currentContext);
+    readEntitySet.addAll(tableHandler.readEntities());
+    writeEntitySet.addAll(tableHandler.writeEntities());
+    databasesUpdated.putAll(tableHandler.databasesUpdated);
+    tablesUpdated.putAll(tableHandler.tablesUpdated);
+    return tasks;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java
new file mode 100644
index 0000000..840f95e
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandler.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.slf4j.Logger;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+
+public interface MessageHandler {
+
+  List<Task<? extends Serializable>> handle(Context withinContext) throws SemanticException;
+
+  Set<ReadEntity> readEntities();
+
+  Set<WriteEntity> writeEntities();
+
+  Map<String, Long> tablesUpdated();
+
+  Map<String, Long> databasesUpdated();
+
+  class Context {
+    final String dbName, tableName, location;
+    final Task<? extends Serializable> precursor;
+    DumpMetaData dmd;
+    final HiveConf hiveConf;
+    final Hive db;
+    final org.apache.hadoop.hive.ql.Context nestedContext;
+    final Logger log;
+
+    public Context(String dbName, String tableName, String location,
+        Task<? extends Serializable> precursor, DumpMetaData dmd, HiveConf hiveConf,
+        Hive db, org.apache.hadoop.hive.ql.Context nestedContext, Logger log) {
+      this.dbName = dbName;
+      this.tableName = tableName;
+      this.location = location;
+      this.precursor = precursor;
+      this.dmd = dmd;
+      this.hiveConf = hiveConf;
+      this.db = db;
+      this.nestedContext = nestedContext;
+      this.log = log;
+    }
+
+    public Context(Context other, String dbName, String tableName) {
+      this.dbName = dbName;
+      this.tableName = tableName;
+      this.location = other.location;
+      this.precursor = other.precursor;
+      this.dmd = other.dmd;
+      this.hiveConf = other.hiveConf;
+      this.db = other.db;
+      this.nestedContext = other.nestedContext;
+      this.log = other.log;
+    }
+
+    boolean isTableNameEmpty() {
+      return StringUtils.isEmpty(tableName);
+    }
+
+    boolean isDbNameEmpty() {
+      return StringUtils.isEmpty(dbName);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandlerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandlerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandlerFactory.java
new file mode 100644
index 0000000..de6ff74
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/MessageHandlerFactory.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Modifier;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+public class MessageHandlerFactory {
+  private static Map<DumpType, Class<? extends MessageHandler>> messageHandlers = new HashMap<>();
+
+  static {
+    register(DumpType.EVENT_DROP_PARTITION, DropPartitionHandler.class);
+    register(DumpType.EVENT_DROP_TABLE, DropTableHandler.class);
+    register(DumpType.EVENT_INSERT, InsertHandler.class);
+    register(DumpType.EVENT_RENAME_PARTITION, RenamePartitionHandler.class);
+    register(DumpType.EVENT_RENAME_TABLE, RenameTableHandler.class);
+
+    register(DumpType.EVENT_CREATE_TABLE, TableHandler.class);
+    register(DumpType.EVENT_ADD_PARTITION, TableHandler.class);
+    register(DumpType.EVENT_ALTER_TABLE, TableHandler.class);
+    register(DumpType.EVENT_ALTER_PARTITION, TableHandler.class);
+
+    register(DumpType.EVENT_TRUNCATE_PARTITION, TruncatePartitionHandler.class);
+    register(DumpType.EVENT_TRUNCATE_TABLE, TruncateTableHandler.class);
+  }
+
+  private static void register(DumpType eventType, Class<? extends MessageHandler> handlerClazz) {
+    try {
+      Constructor<? extends MessageHandler> constructor =
+          handlerClazz.getDeclaredConstructor();
+      assert constructor != null;
+      assert !Modifier.isPrivate(constructor.getModifiers());
+      messageHandlers.put(eventType, handlerClazz);
+    } catch (NoSuchMethodException e) {
+      throw new IllegalArgumentException("handler class: " + handlerClazz.getCanonicalName()
+          + " does not have the a constructor with only parameter of type:"
+          + NotificationEvent.class.getCanonicalName(), e);
+    }
+  }
+
+  public static MessageHandler handlerFor(DumpType eventType) {
+    if (messageHandlers.containsKey(eventType)) {
+      Class<? extends MessageHandler> handlerClazz = messageHandlers.get(eventType);
+      try {
+        Constructor<? extends MessageHandler> constructor =
+            handlerClazz.getDeclaredConstructor();
+        return constructor.newInstance();
+      } catch (NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException e) {
+        // this should never happen. however we want to make sure we propagate the exception
+        throw new RuntimeException(
+            "failed when creating handler for " + eventType
+                + " with the responsible class being " + handlerClazz.getCanonicalName(), e);
+      }
+    }
+    return new DefaultHandler();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
new file mode 100644
index 0000000..658f2ba
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+class RenamePartitionHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context context)
+      throws SemanticException {
+
+    AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
+    String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
+    String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
+
+    Map<String, String> newPartSpec = new LinkedHashMap<>();
+    Map<String, String> oldPartSpec = new LinkedHashMap<>();
+    String tableName = actualDbName + "." + actualTblName;
+    try {
+      Table tblObj = msg.getTableObj();
+      Iterator<String> beforeIterator = msg.getPtnObjBefore().getValuesIterator();
+      Iterator<String> afterIterator = msg.getPtnObjAfter().getValuesIterator();
+      for (FieldSchema fs : tblObj.getPartitionKeys()) {
+        oldPartSpec.put(fs.getName(), beforeIterator.next());
+        newPartSpec.put(fs.getName(), afterIterator.next());
+      }
+    } catch (Exception e) {
+      throw (e instanceof SemanticException)
+          ? (SemanticException) e
+          : new SemanticException("Error reading message members", e);
+    }
+
+    RenamePartitionDesc renamePtnDesc =
+        new RenamePartitionDesc(tableName, oldPartSpec, newPartSpec);
+    Task<DDLWork> renamePtnTask = TaskFactory.get(
+        new DDLWork(readEntitySet, writeEntitySet, renamePtnDesc), context.hiveConf
+    );
+    context.log
+        .debug("Added rename ptn task : {}:{}->{}", renamePtnTask.getId(), oldPartSpec,
+            newPartSpec);
+    databasesUpdated.put(actualDbName, context.dmd.getEventTo());
+    tablesUpdated.put(tableName, context.dmd.getEventTo());
+    return Collections.singletonList(renamePtnTask);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
new file mode 100644
index 0000000..2c429c1
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.List;
+
+class RenameTableHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context context)
+      throws SemanticException {
+
+    AlterTableMessage msg = deserializer.getAlterTableMessage(context.dmd.getPayload());
+    if (!context.isTableNameEmpty()) {
+      throw new SemanticException(
+          "RENAMES of tables are not supported for table-level replication");
+    }
+    try {
+      String oldDbName = msg.getTableObjBefore().getDbName();
+      String newDbName = msg.getTableObjAfter().getDbName();
+
+      if (!context.isDbNameEmpty()) {
+        // If we're loading into a db, instead of into the warehouse, then the oldDbName and
+        // newDbName must be the same
+        if (!oldDbName.equalsIgnoreCase(newDbName)) {
+          throw new SemanticException("Cannot replicate an event renaming a table across"
+              + " databases into a db level load " + oldDbName + "->" + newDbName);
+        } else {
+          // both were the same, and can be replaced by the new db we're loading into.
+          oldDbName = context.dbName;
+          newDbName = context.dbName;
+        }
+      }
+
+      String oldName = oldDbName + "." + msg.getTableObjBefore().getTableName();
+      String newName = newDbName + "." + msg.getTableObjAfter().getTableName();
+      AlterTableDesc renameTableDesc = new AlterTableDesc(oldName, newName, false);
+      Task<DDLWork> renameTableTask = TaskFactory.get(
+          new DDLWork(readEntitySet, writeEntitySet, renameTableDesc), context.hiveConf
+      );
+      context.log.debug(
+          "Added rename table task : {}:{}->{}", renameTableTask.getId(), oldName, newName
+      );
+      // oldDbName and newDbName *will* be the same if we're here
+      databasesUpdated.put(newDbName, context.dmd.getEventTo());
+      tablesUpdated.remove(oldName);
+      tablesUpdated.put(newName, context.dmd.getEventTo());
+      // Note : edge-case here in interaction with table-level REPL LOAD, where that nukes out tablesUpdated
+      // However, we explicitly don't support repl of that sort, and error out above if so. If that should
+      // ever change, this will need reworking.
+      return Collections.singletonList(renameTableTask);
+    } catch (Exception e) {
+      throw (e instanceof SemanticException)
+          ? (SemanticException) e
+          : new SemanticException("Error reading message members", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
new file mode 100644
index 0000000..2db8385
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.parse.EximUtil;
+import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+
+class TableHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
+    // Path being passed to us is a table dump location. We go ahead and load it in as needed.
+    // If tblName is null, then we default to the table name specified in _metadata, which is good.
+    // or are both specified, in which case, that's what we are intended to create the new table as.
+    if (context.isDbNameEmpty()) {
+      throw new SemanticException("Database name cannot be null for a table load");
+    }
+    try {
+      // TODO: why not have the below variables as static / inline seems to have no possibility of updates back here
+
+      // no location set on repl loads
+      boolean isLocationSet = false;
+      // all repl imports are non-external
+      boolean isExternalSet = false;
+      // bootstrap loads are not partition level
+      boolean isPartSpecSet = false;
+      // repl loads are not partition level
+      LinkedHashMap<String, String> parsedPartSpec = null;
+      // no location for repl imports
+      String parsedLocation = null;
+      List<Task<? extends Serializable>> importTasks = new ArrayList<>();
+
+      EximUtil.SemanticAnalyzerWrapperContext x =
+          new EximUtil.SemanticAnalyzerWrapperContext(
+              context.hiveConf, context.db, readEntitySet, writeEntitySet, importTasks, context.log,
+              context.nestedContext);
+      ImportSemanticAnalyzer.prepareImport(isLocationSet, isExternalSet, isPartSpecSet,
+          (context.precursor != null), parsedLocation, context.tableName, context.dbName,
+          parsedPartSpec, context.location, x,
+          databasesUpdated, tablesUpdated);
+
+      return importTasks;
+    } catch (Exception e) {
+      throw new SemanticException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
new file mode 100644
index 0000000..5436f0d
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+class TruncatePartitionHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
+    AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
+    String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
+    String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
+
+    Map<String, String> partSpec = new LinkedHashMap<>();
+    try {
+      org.apache.hadoop.hive.metastore.api.Table tblObj = msg.getTableObj();
+      Iterator<String> afterIterator = msg.getPtnObjAfter().getValuesIterator();
+      for (FieldSchema fs : tblObj.getPartitionKeys()) {
+        partSpec.put(fs.getName(), afterIterator.next());
+      }
+    } catch (Exception e) {
+      if (!(e instanceof SemanticException)) {
+        throw new SemanticException("Error reading message members", e);
+      } else {
+        throw (SemanticException) e;
+      }
+    }
+
+    TruncateTableDesc truncateTableDesc = new TruncateTableDesc(
+        actualDbName + "." + actualTblName, partSpec);
+    Task<DDLWork> truncatePtnTask =
+        TaskFactory.get(
+            new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc),
+            context.hiveConf
+        );
+    context.log.debug("Added truncate ptn task : {}:{}", truncatePtnTask.getId(),
+        truncateTableDesc.getTableName());
+    databasesUpdated.put(actualDbName, context.dmd.getEventTo());
+    return Collections.singletonList(truncatePtnTask);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
new file mode 100644
index 0000000..731383c
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.load.message;
+
+import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.List;
+
+class TruncateTableHandler extends AbstractMessageHandler {
+  @Override
+  public List<Task<? extends Serializable>> handle(Context context) throws SemanticException {
+    AlterTableMessage msg = deserializer.getAlterTableMessage(context.dmd.getPayload());
+    String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName;
+    String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName;
+
+    TruncateTableDesc truncateTableDesc = new TruncateTableDesc(
+        actualDbName + "." + actualTblName, null);
+    Task<DDLWork> truncateTableTask = TaskFactory.get(
+        new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc),
+        context.hiveConf
+    );
+
+    context.log.debug("Added truncate tbl task : {}:{}", truncateTableTask.getId(),
+        truncateTableDesc.getTableName());
+    databasesUpdated.put(actualDbName, context.dmd.getEventTo());
+    return Collections.singletonList(truncateTableTask);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/events/TestEventHandlerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/events/TestEventHandlerFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/events/TestEventHandlerFactory.java
new file mode 100644
index 0000000..c689e6f
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/events/TestEventHandlerFactory.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+public class TestEventHandlerFactory {
+  @Test(expected = IllegalArgumentException.class)
+  public void shouldNotAllowRegisteringEventsWhichCannotBeInstantiated() {
+    class NonCompatibleEventHandler implements EventHandler {
+      @Override
+      public void handle(Context withinContext) throws Exception {
+
+      }
+
+      @Override
+      public long fromEventId() {
+        return 0;
+      }
+
+      @Override
+      public long toEventId() {
+        return 0;
+      }
+
+      @Override
+      public DumpType dumpType() {
+        return null;
+      }
+    }
+    EventHandlerFactory.register("anyEvent", NonCompatibleEventHandler.class);
+  }
+
+  @Test
+  public void shouldProvideDefaultHandlerWhenNothingRegisteredForThatEvent() {
+    EventHandler eventHandler =
+        EventHandlerFactory.handlerFor(new NotificationEvent(Long.MAX_VALUE, Integer.MAX_VALUE,
+            "shouldGiveDefaultHandler", "s"));
+    assertTrue(eventHandler instanceof DefaultHandler);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java
deleted file mode 100644
index 4b802c4..0000000
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/events/TestEventHandlerFactory.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-import org.junit.Test;
-
-import static org.junit.Assert.assertTrue;
-
-public class TestEventHandlerFactory {
-  @Test(expected = IllegalArgumentException.class)
-  public void shouldNotAllowRegisteringEventsWhichCannotBeInstantiated() {
-    class NonCompatibleEventHandler implements EventHandler {
-      @Override
-      public void handle(Context withinContext) throws Exception {
-
-      }
-
-      @Override
-      public long fromEventId() {
-        return 0;
-      }
-
-      @Override
-      public long toEventId() {
-        return 0;
-      }
-
-      @Override
-      public DumpType dumpType() {
-        return null;
-      }
-    }
-    EventHandlerFactory.register("anyEvent", NonCompatibleEventHandler.class);
-  }
-
-  @Test
-  public void shouldProvideDefaultHandlerWhenNothingRegisteredForThatEvent() {
-    EventHandler eventHandler =
-        EventHandlerFactory.handlerFor(new NotificationEvent(Long.MAX_VALUE, Integer.MAX_VALUE,
-            "shouldGiveDefaultHandler", "s"));
-    assertTrue(eventHandler instanceof DefaultHandler);
-  }
-
-}
\ No newline at end of file


[49/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------
diff --cc common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index c6bc9b9,985fd8c..58284dc
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@@ -870,11 -865,11 +870,11 @@@ public final class FileUtils 
      }
      return false;
    }
--  
--  
++
++
    /**
     * Return whenever all paths in the collection are schemaless
--   * 
++   *
     * @param paths
     * @return
     */
@@@ -889,16 -884,16 +889,16 @@@
  
    /**
     * Returns the deepest candidate path for the given path.
--   * 
++   *
     * prioritizes on paths including schema / then includes matches without schema
--   * 
++   *
     * @param path
     * @param candidates  the candidate paths
     * @return
     */
    public static Path getParentRegardlessOfScheme(Path path, Collection<Path> candidates) {
      Path schemalessPath = Path.getPathWithoutSchemeAndAuthority(path);
--    
++
      for(;path!=null && schemalessPath!=null; path=path.getParent(),schemalessPath=schemalessPath.getParent()){
        if(candidates.contains(path))
          return path;
@@@ -911,13 -906,13 +911,13 @@@
  
    /**
     * Checks whenever path is inside the given subtree
--   * 
++   *
     * return true iff
     *  * path = subtree
     *  * subtreeContains(path,d) for any descendant of the subtree node
     * @param path    the path in question
     * @param subtree
--   * 
++   *
     * @return
     */
    public static boolean isPathWithinSubtree(Path path, Path subtree) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --cc itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 8468b84,88b9faf..93ff498
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@@ -915,49 -914,8 +916,55 @@@ public class DummyRawStoreFailEvent imp
    }
  
    @Override
+   public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
+       String tableName) throws MetaException, NoSuchObjectException {
+     return objectStore.getAggrColStatsForTablePartitions(dbName, tableName);
+   }
++
++  @Override
 +  @CanNotRetry
 +  public Boolean commitTransactionExpectDeadlock() {
 +    return null;
 +  }
 +
 +  @Override
 +  public void createTableWrite(Table arg0, long arg1, char arg2, long arg3) {
 +  }
 +
 +  @Override
 +  public void deleteTableWrites(String arg0, String arg1, long arg2, long arg3)
 +      throws MetaException {
 +  }
 +
 +  @Override
 +  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
 +    return null;
 +  }
 +
 +  @Override
 +  public Collection<String> getAllPartitionLocations(String arg0, String arg1) {
 +    return null;
 +  }
 +
 +  @Override
 +  public MTableWrite getTableWrite(String arg0, String arg1, long arg2)
 +      throws MetaException {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<Long> getTableWriteIds(String arg0, String arg1, long arg2,
 +      long arg3, char arg4) throws MetaException {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<MTableWrite> getTableWrites(String arg0, String arg1, long arg2,
 +      long arg3) throws MetaException {
 +    return null;
 +  }
 +
 +  @Override
 +  public void updateTableWrite(MTableWrite arg0) {
 +  }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
----------------------------------------------------------------------
diff --cc metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
index 7f1a64b,16e611c..a4977b6
--- a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
@@@ -60,7 -60,7 +60,7 @@@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" B
  
  CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
  
- CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL, "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1, "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0);
 -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL);
++CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL, "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1, "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0);
  
  CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
  

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
----------------------------------------------------------------------
diff --cc metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
index cbf5be1,b05942f..3e87091
--- a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
@@@ -1,7 -1,6 +1,8 @@@
  -- Upgrade MetaStore schema from 2.1.0 to 2.2.0
  RUN '037-HIVE-14496.derby.sql';
  RUN '038-HIVE-10562.derby.sql';
+ RUN '039-HIVE-12274.derby.sql';
  
 +RUN '037-HIVE-14637.derby.sql';
 +
  UPDATE "APP".VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
----------------------------------------------------------------------
diff --cc metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
index 99024c2,4995349..b786b16
--- a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
@@@ -1,8 -1,8 +1,9 @@@
  SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE;
  
  :r 022-HIVE-14496.mssql.sql
 +:r 023-HIVE-14637.mssql.sql
  :r 023-HIVE-10562.mssql.sql
+ :r 024-HIVE-12274.mssql.sql
  
  UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
  SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --cc metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
index 68300d3,e221439..f4c69a5
--- a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
@@@ -1,8 -1,8 +1,9 @@@
  SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' ';
  
  SOURCE 037-HIVE-14496.mysql.sql;
 +SOURCE 038-HIVE-14637.mysql.sql;
  SOURCE 038-HIVE-10562.mysql.sql;
+ SOURCE 039-HIVE-12274.mysql.sql;
  
  UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
  SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' ';

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --cc metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
index 058c0d5,53ec681..b2f35de
--- a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
@@@ -1,8 -1,8 +1,9 @@@
  SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual;
  
  @037-HIVE-14496.oracle.sql;
 +@038-HIVE-14637.oracle.sql;
  @038-HIVE-10562.oracle.sql;
+ @039-HIVE-12274.oracle.sql;
  
  UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
  SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --cc metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
index ae4adf7,732e184..e6daeca
--- a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
@@@ -1,8 -1,8 +1,9 @@@
  SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0';
  
  \i 036-HIVE-14496.postgres.sql;
 +\i 037-HIVE-14637.postgres.sql;
  \i 037-HIVE-10562.postgres.sql;
+ \i 038-HIVE-12274.postgres.sql;
  
  UPDATE "VERSION" SET "SCHEMA_VERSION"='2.2.0', "VERSION_COMMENT"='Hive release version 2.2.0' where "VER_ID"=1;
  SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0';


[04/50] [abbrv] hive git commit: HIVE-16550: Semijoin Hints should be able to skip the optimization if needed (Deepak Jaiswal, reviewed by Jason Dere)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/5d459665/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out b/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
index bc24893..388888e 100644
--- a/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
+++ b/ql/src/test/results/clientpositive/llap/semijoin_hint.q.out
@@ -160,9 +160,236 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srccc
 POSTHOOK: Lineage: srccc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: srccc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN select  /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1) join alltypesorc_int i on (k.value = i.cstring)
+PREHOOK: query: explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN select  /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1) join alltypesorc_int i on (k.value = i.cstring)
+POSTHOOK: query: explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Reducer 5 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+        Reducer 5 <- Map 4 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_7_v_str_min) AND DynamicValue(RS_7_v_str_max) and in_bloom_filter(str, DynamicValue(RS_7_v_str_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_7_v_str_min) AND DynamicValue(RS_7_v_str_max) and in_bloom_filter(str, DynamicValue(RS_7_v_str_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string)
+                      outputColumnNames: str
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: str (type: string)
+                        sort order: +
+                        Map-reduce partition columns: str (type: string)
+                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: key1
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: key1 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: key1 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Select Operator
+                        expressions: key1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=410)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 str (type: string)
+                  1 key1 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Select Operator
+                  Statistics: Num rows: 9756 Data size: 39024 Basic stats: COMPLETE Column stats: PARTIAL
+                  Group By Operator
+                    aggregations: count()
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                      value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: $f0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=410)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: str is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: str is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string)
+                      outputColumnNames: str
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: str (type: string)
+                        sort order: +
+                        Map-reduce partition columns: str (type: string)
+                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: key1
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: key1 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: key1 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 str (type: string)
+                  1 key1 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Select Operator
+                  Statistics: Num rows: 9756 Data size: 39024 Basic stats: COMPLETE Column stats: PARTIAL
+                  Group By Operator
+                    aggregations: count()
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                      value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: $f0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN select  /*+ semi(srcpart_date, str, 5000)*/ count(*) from srcpart_date join srcpart_small v on (srcpart_date.str = v.key1) join alltypesorc_int i on (srcpart_date.value = i.cstring)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select  /*+ semi(srcpart_date, str, 5000)*/ count(*) from srcpart_date join srcpart_small v on (srcpart_date.str = v.key1) join alltypesorc_int i on (srcpart_date.value = i.cstring)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -186,10 +413,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: i
-                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_7_k_cstring_min) AND DynamicValue(RS_7_k_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_7_k_cstring_bloom_filter)))) (type: boolean)
+                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_7_srcpart_date_cstring_min) AND DynamicValue(RS_7_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_7_srcpart_date_cstring_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 12288 Data size: 862450 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_7_k_cstring_min) AND DynamicValue(RS_7_k_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_7_k_cstring_bloom_filter)))) (type: boolean)
+                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_7_srcpart_date_cstring_min) AND DynamicValue(RS_7_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_7_srcpart_date_cstring_bloom_filter)))) (type: boolean)
                     Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: cstring (type: string)
@@ -205,7 +432,7 @@ STAGE PLANS:
         Map 6 
             Map Operator Tree:
                 TableScan
-                  alias: k
+                  alias: srcpart_date
                   filterExpr: (str is not null and value is not null) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
@@ -506,9 +733,9 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: explain select /*+ semi(k, str, 1000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain select /*+ semi(k, str, 1000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -548,7 +775,7 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                         Group By Operator
-                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=1000)
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
                           mode: hash
                           outputColumnNames: _col0, _col1, _col2
                           Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
@@ -618,7 +845,7 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
-                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=1000)
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
                 mode: final
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
@@ -633,9 +860,13 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: explain select /*+ semi(k, 1000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
+        union all
+        select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1)
 PREHOOK: type: QUERY
-POSTHOOK: query: explain select /*+ semi(k, 1000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
+        union all
+        select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -646,10 +877,14 @@ STAGE PLANS:
     Tez
 #### A masked pattern was here ####
       Edges:
-        Map 5 <- Reducer 4 (BROADCAST_EDGE)
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
-        Reducer 4 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+        Map 6 <- Reducer 5 (BROADCAST_EDGE)
+        Map 7 <- Reducer 11 (BROADCAST_EDGE)
+        Reducer 11 <- Map 10 (CUSTOM_SIMPLE_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE), Union 4 (CONTAINS)
+        Reducer 5 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+        Reducer 8 <- Map 10 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+        Reducer 9 <- Reducer 8 (CUSTOM_SIMPLE_EDGE), Union 4 (CONTAINS)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -675,7 +910,7 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
                         Group By Operator
-                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=1000)
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
                           mode: hash
                           outputColumnNames: _col0, _col1, _col2
                           Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
@@ -685,10 +920,43 @@ STAGE PLANS:
                             value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
             Execution mode: llap
             LLAP IO: all inputs
-        Map 5 
+        Map 10 
             Map Operator Tree:
                 TableScan
                   alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Select Operator
+                        expressions: _col0 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: s
                   filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_key1_min) AND DynamicValue(RS_6_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_key1_bloom_filter)))) (type: boolean)
                   Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
                   Filter Operator
@@ -705,19 +973,1516 @@ STAGE PLANS:
                         Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
             LLAP IO: all inputs
-        Reducer 2 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: string)
-                  1 _col0 (type: string)
-                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
+        Map 7 
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_21_v_str_min) AND DynamicValue(RS_21_v_str_max) and in_bloom_filter(str, DynamicValue(RS_21_v_str_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_21_v_str_min) AND DynamicValue(RS_21_v_str_max) and in_bloom_filter(str, DynamicValue(RS_21_v_str_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 11 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+        Reducer 8 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 9 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Union 4 
+            Vertex: Union 4
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Reducer 5 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+        Reducer 5 <- Map 4 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_7_v_str_min) AND DynamicValue(RS_7_v_str_max) and in_bloom_filter(str, DynamicValue(RS_7_v_str_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_7_v_str_min) AND DynamicValue(RS_7_v_str_max) and in_bloom_filter(str, DynamicValue(RS_7_v_str_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Select Operator
+                        expressions: _col0 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=410)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=410)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: str is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: str is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN select  /*+ semi(srcpart_date, str, 5000)*/ count(*) from srcpart_date join srcpart_small v on (srcpart_date.str = v.key1) join alltypesorc_int i on (srcpart_date.value = i.cstring)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select  /*+ semi(srcpart_date, str, 5000)*/ count(*) from srcpart_date join srcpart_small v on (srcpart_date.str = v.key1) join alltypesorc_int i on (srcpart_date.value = i.cstring)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Reducer 6 (BROADCAST_EDGE)
+        Map 5 <- Reducer 8 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Map 7 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
+        Reducer 6 <- Map 5 (CUSTOM_SIMPLE_EDGE)
+        Reducer 8 <- Map 7 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: i
+                  filterExpr: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_date_cstring_min) AND DynamicValue(RS_10_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 12288 Data size: 862450 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (cstring is not null and (cstring BETWEEN DynamicValue(RS_10_srcpart_date_cstring_min) AND DynamicValue(RS_10_srcpart_date_cstring_max) and in_bloom_filter(cstring, DynamicValue(RS_10_srcpart_date_cstring_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: cstring (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_date
+                  filterExpr: (str is not null and value is not null and (str BETWEEN DynamicValue(RS_13_v_str_min) AND DynamicValue(RS_13_v_str_max) and in_bloom_filter(str, DynamicValue(RS_13_v_str_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (str is not null and value is not null and (str BETWEEN DynamicValue(RS_13_v_str_min) AND DynamicValue(RS_13_v_str_max) and in_bloom_filter(str, DynamicValue(RS_13_v_str_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: string)
+                        Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: string)
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 2000 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 7 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Select Operator
+                        expressions: _col0 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=410)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col1 (type: string)
+                outputColumnNames: _col1
+                Statistics: Num rows: 3281 Data size: 285447 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col1 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: string)
+                  Statistics: Num rows: 3281 Data size: 285447 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col1 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 16004 Data size: 128032 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 6 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+        Reducer 8 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=410)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN select  /*+ semi(i, 3000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1) join alltypesorc_int i on (v.key1 = i.cstring)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select  /*+ semi(i, 3000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1) join alltypesorc_int i on (v.key1 = i.cstring)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 5 <- Reducer 4 (BROADCAST_EDGE)
+        Map 6 <- Reducer 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+        Reducer 4 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: i
+                  filterExpr: cstring is not null (type: boolean)
+                  Statistics: Num rows: 12288 Data size: 862450 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: cstring is not null (type: boolean)
+                    Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: cstring (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+                      Select Operator
+                        expressions: _col0 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 9174 Data size: 643900 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=3000)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_key1_min) AND DynamicValue(RS_9_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_key1_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_9_i_key1_min) AND DynamicValue(RS_9_i_key1_max) and in_bloom_filter(key1, DynamicValue(RS_9_i_key1_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_9_i_str_min) AND DynamicValue(RS_9_i_str_max) and in_bloom_filter(str, DynamicValue(RS_9_i_str_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_9_i_str_min) AND DynamicValue(RS_9_i_str_max) and in_bloom_filter(str, DynamicValue(RS_9_i_str_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                  2 _col0 (type: string)
+                Statistics: Num rows: 16008 Data size: 128064 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=3000)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 5 <- Reducer 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+        Reducer 4 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: str is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: str is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Select Operator
+                        expressions: _col0 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_key1_min) AND DynamicValue(RS_6_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_key1_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_6_k_key1_min) AND DynamicValue(RS_6_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_6_k_key1_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
+        union all
+        select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ semi(k, str, 5000)*/ count(*) from srcpart_date k join srcpart_small s on (k.str = s.key1)
+        union all
+        select /*+ semi(v, 5000)*/ count(*) from srcpart_date d join srcpart_small v on (d.str = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 6 <- Reducer 5 (BROADCAST_EDGE)
+        Map 7 <- Reducer 11 (BROADCAST_EDGE)
+        Reducer 11 <- Map 10 (CUSTOM_SIMPLE_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE), Union 4 (CONTAINS)
+        Reducer 5 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+        Reducer 8 <- Map 10 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+        Reducer 9 <- Reducer 8 (CUSTOM_SIMPLE_EDGE), Union 4 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: str is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: str is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: str (type: string)
+                      sort order: +
+                      Map-reduce partition columns: str (type: string)
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: str (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 10 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: key1 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key1 (type: string)
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Group By Operator
+                        aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=5000)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                          value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_key1_min) AND DynamicValue(RS_3_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_key1_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: (key1 is not null and (key1 BETWEEN DynamicValue(RS_3_k_key1_min) AND DynamicValue(RS_3_k_key1_max) and in_bloom_filter(key1, DynamicValue(RS_3_k_key1_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: key1 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key1 (type: string)
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 7 
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_17_v_str_min) AND DynamicValue(RS_17_v_str_max) and in_bloom_filter(str, DynamicValue(RS_17_v_str_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_17_v_str_min) AND DynamicValue(RS_17_v_str_max) and in_bloom_filter(str, DynamicValue(RS_17_v_str_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: str (type: string)
+                      sort order: +
+                      Map-reduce partition columns: str (type: string)
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 11 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 str (type: string)
+                  1 key1 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=5000)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+        Reducer 8 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 str (type: string)
+                  1 key1 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 9 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Union 4 
+            Vertex: Union 4
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Reducer 5 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+        Reducer 5 <- Map 4 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: (str is not null and (str BETWEEN DynamicValue(RS_5_v_str_min) AND DynamicValue(RS_5_v_str_max) and in_bloom_filter(str, DynamicValue(RS_5_v_str_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (str is not null and (str BETWEEN DynamicValue(RS_5_v_str_min) AND DynamicValue(RS_5_v_str_max) and in_bloom_filter(str, DynamicValue(RS_5_v_str_bloom_filter)))) (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: str (type: string)
+                      sort order: +
+                      Map-reduce partition columns: str (type: string)
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: key1 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key1 (type: string)
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                      Group By Operator
+                        aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=410)
+                        mode: hash
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                          value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 str (type: string)
+                  1 key1 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=410)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: PARTIAL
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ semi(None)*/ count(*) from srcpart_date k join srcpart_small v on (k.str = v.key1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  filterExpr: str is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: str is not null (type: boolean)
+                    Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: str (type: string)
+                      sort order: +
+                      Map-reduce partition columns: str (type: string)
+                      Statistics: Num rows: 2000 Data size: 174000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: v
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: key1 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key1 (type: string)
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: PARTIAL
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 str (type: string)
+                  1 key1 (type: string)
+                Statistics: Num rows: 9756 Data size: 78048 Basic stats: COMPLETE Column stats: PARTIAL
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+       

<TRUNCATED>

[21/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
index d97ff1d..9b93644 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
@@ -242,7 +242,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","Statist
 ics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","TableScan Vectorization:":{"native:":"true","projectedOutputColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumns:":"[2, 6]"},"Stat
 istics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col1"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_23","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"className:":
 "VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_22","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","vectorOutput:":"true","native:":"false","projectedOutputColumns:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_13","children":{"Reduce Output Operator":{"sort order:":"","Reduce Sink Vectorization:":{"className:
 ":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_14","OutputOperators:":"[GBY_15]"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"groupByVectorOutput:":"true","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 6]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbig
 int:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"Group By Vectorization:":{"vectorOutput:":"false","native:":"false","projectedOutputColumns:":"null"},"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInp
 utFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","Statist
 ics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","TableScan Vectorization:":{"native:":"true","projectedOutputColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumns:":"[2, 6]"},"Stat
 istics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col1"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_23","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"className:":
 "VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_22","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","vectorOutput:":"true","native:":"false","projectedOutputColumns:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_13","children":{"Reduce Output Operator":{"sort order:":"","Reduce Sink Vectorization:":{"className:
 ":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_14","OutputOperators:":"[GBY_15]"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"groupByVectorOutput:":"true","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 6]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","
 cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"Group By Vectorization:":{"vectorOutput:":"false","native:":"false","projectedOutputColumns:":"null"},"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFil
 eInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
 PREHOOK: query: select count(*) from (select c.cstring1
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -282,7 +282,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1
 ) t1
 POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cstring2 (type: string)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":
 "hd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","TableScan Vectorization:":{"native:":"true","projectedOutputColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cstring1 (type: string), cstring2 (type: string)","outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColum
 ns:":"[6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_23","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"Map Join Vect
 orization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_22","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","vectorOutput:":"true","native:":"false","projectedOutputColumns:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_13","children":{"Reduce Output Operator":{"sort order:":"","Reduce Sink Ve
 ctorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_14","OutputOperators:":"[GBY_15]"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"groupByVectorOutput:":"true","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:
 smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"Group By Vectorization:":{"vectorOutput:":"false","native:":"false","projectedOutputColumns:":"null"},"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.had
 oop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cstring2 (type: string)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":
 "hd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","TableScan Vectorization:":{"native:":"true","projectedOutputColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cstring1 (type: string), cstring2 (type: string)","outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColum
 ns:":"[6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_23","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"Map Join Vect
 orization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_22","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","vectorOutput:":"true","native:":"false","projectedOutputColumns:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_13","children":{"Reduce Output Operator":{"sort order:":"","Reduce Sink Ve
 ctorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_14","OutputOperators:":"[GBY_15]"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"groupByVectorOutput:":"true","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[6, 7]","dataColumns:":["ctinyint:tinyint","csmall
 int:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"Group By Vectorization:":{"vectorOutput:":"false","native:":"false","projectedOutputColumns:":"null"},"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache
 .hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
 PREHOOK: query: select count(*) from (select c.cstring1
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd
@@ -322,7 +322,7 @@ left outer join small_alltypesorc_a hd
   on hd.cstring1 = c.cstring1 and hd.cint = c.cint
 ) t1
 POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cbigint (type: bigint), cstring2 (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"}
 ,"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","TableScan Vectorization:":{"native:":"true","projectedOutputColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cbigint (type: bigint), cstrin
 g1 (type: string), cstring2 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumns:":"[2, 3, 6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col2"],"Statistics:":"N
 um rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_23","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_22","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","
 vectorOutput:":"true","native:":"false","projectedOutputColumns:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_13","children":{"Reduce Output Operator":{"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_14","OutputOperators:":"[GBY_15]"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"]
 ,"groupByVectorOutput:":"true","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 3, 6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"Group By Vectorization:":{"vectorOutput:":"false","native:":"false","projectedOutputColumns:":"null"},"mode:":"mergepartial","outputColumnName
 s:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cbigint (type: bigint), cstring2 (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"}
 ,"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","TableScan Vectorization:":{"native:":"true","projectedOutputColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cbigint (type: bigint), cstrin
 g1 (type: string), cstring2 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumns:":"[2, 3, 6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col2"],"Statistics:":"N
 um rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_23","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_22","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","
 vectorOutput:":"true","native:":"false","projectedOutputColumns:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_13","children":{"Reduce Output Operator":{"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_14","OutputOperators:":"[GBY_15]"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS tr
 ue"],"groupByVectorOutput:":"true","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 3, 6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"Group By Vectorization:":{"vectorOutput:":"false","native:":"false","projectedOutputColumns:":"null"},"mode:":"mergepartial","outputColumn
 Names:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
 PREHOOK: query: select count(*) from (select c.cstring1
 from small_alltypesorc_a c
 left outer join small_alltypesorc_a cd

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
index 6ea8237..226898a 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
@@ -780,7 +780,7 @@ left outer join small_alltypesorc_b hd
   on hd.ctinyint = c.ctinyint
 ) t1
 POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","Statist
 ics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","TableScan Vectorization:":{"native:":"true","projectedOutputColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint), cint (type: int)","outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumns:":"[0, 2]"},"
 Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_23","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"Map Join Vectorization:":{"classN
 ame:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 36 Data size: 8082 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_22","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","vectorOutput:":"true","native:":"false","projectedOutputColumns:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_13","children":{"Reduce Output Operator":{"sort order:":"","Reduce Sink Vectorization:":{"clas
 sName:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_14","OutputOperators:":"[GBY_15]"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"groupByVectorOutput:":"true","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[0, 2]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int"
 ,"cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"Group By Vectorization:":{"vectorOutput:":"false","native:":"false","projectedOutputColumns:":"null"},"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceF
 ileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","Statist
 ics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint)","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","TableScan Vectorization:":{"native:":"true","projectedOutputColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint), cint (type: int)","outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumns:":"[0, 2]"},"
 Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_1","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_23","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join0 to 1"}],"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"Map Join Vectorization:":{"classN
 ame:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 36 Data size: 8082 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_22","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","vectorOutput:":"true","native:":"false","projectedOutputColumns:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_13","children":{"Reduce Output Operator":{"sort order:":"","Reduce Sink Vectorization:":{"clas
 sName:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_14","OutputOperators:":"[GBY_15]"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"groupByVectorOutput:":"true","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[0, 2]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:
 int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"Group By Vectorization:":{"vectorOutput:":"false","native:":"false","projectedOutputColumns:":"null"},"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.Seque
 nceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
 PREHOOK: query: select count(*) from (select c.ctinyint
 from small_alltypesorc_b c
 left outer join small_alltypesorc_b cd

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_reduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce1.q.out b/ql/src/test/results/clientpositive/vector_reduce1.q.out
index 2a985b1..68f836d 100644
--- a/ql/src/test/results/clientpositive/vector_reduce1.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce1.q.out
@@ -139,7 +139,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_reduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce2.q.out b/ql/src/test/results/clientpositive/vector_reduce2.q.out
index 27ea4ff..0da1f5c 100644
--- a/ql/src/test/results/clientpositive/vector_reduce2.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce2.q.out
@@ -139,7 +139,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_reduce3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce3.q.out b/ql/src/test/results/clientpositive/vector_reduce3.q.out
index 2264366..8c20fe8 100644
--- a/ql/src/test/results/clientpositive/vector_reduce3.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce3.q.out
@@ -139,7 +139,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
index 73aa28b..724ef45 100644
--- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
@@ -74,8 +74,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 6102 Data size: 1440072 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col4 (type: decimal(20,10))
@@ -123,8 +123,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 3051 Data size: 720036 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col4 (type: decimal(20,10))

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_string_concat.q.out b/ql/src/test/results/clientpositive/vector_string_concat.q.out
index 3451a7e..00f9b38 100644
--- a/ql/src/test/results/clientpositive/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/vector_string_concat.q.out
@@ -361,8 +361,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
@@ -408,8 +408,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
index 38f13da..c96ea00 100644
--- a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
+++ b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
@@ -281,7 +281,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
               value expressions: _col0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
index 1bed203..0f8bdb5 100644
--- a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
@@ -306,8 +306,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 10 Data size: 2150 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col0 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_when_case_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_when_case_null.q.out b/ql/src/test/results/clientpositive/vector_when_case_null.q.out
index 7476bbb..a7ab4ef 100644
--- a/ql/src/test/results/clientpositive/vector_when_case_null.q.out
+++ b/ql/src/test/results/clientpositive/vector_when_case_null.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out
index 77ca3a2..35c704e 100644
--- a/ql/src/test/results/clientpositive/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_13.q.out
@@ -166,8 +166,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
@@ -481,8 +481,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_7.q.out b/ql/src/test/results/clientpositive/vectorization_7.q.out
index e534296..c05fee0 100644
--- a/ql/src/test/results/clientpositive/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_7.q.out
@@ -92,8 +92,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
@@ -306,8 +306,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_8.q.out b/ql/src/test/results/clientpositive/vectorization_8.q.out
index 7650c5f..ce2a4b5 100644
--- a/ql/src/test/results/clientpositive/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_8.q.out
@@ -88,8 +88,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
@@ -289,8 +289,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_div0.q.out b/ql/src/test/results/clientpositive/vectorization_div0.q.out
index 83dff79..1dd7180 100644
--- a/ql/src/test/results/clientpositive/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_div0.q.out
@@ -219,8 +219,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: decimal(22,21))
@@ -417,8 +417,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double)


[27/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
index ef67ec2..d3586e0 100644
--- a/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
@@ -59,7 +59,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 50 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -101,7 +101,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 17 Data size: 272 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: int), _col2 (type: int)
             Execution mode: vectorized, llap
@@ -155,7 +155,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -326,7 +326,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 50 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -369,7 +369,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 17 Data size: 272 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: int), _col2 (type: int), _col3 (type: int)
             Execution mode: vectorized, llap
@@ -423,7 +423,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkMultiKeyOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: no inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out b/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out
index 6edc474..10051d1 100644
--- a/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_non_constant_in_expr.q.out
@@ -21,7 +21,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (cint) IN (ctinyint, cbigint) (type: boolean)
+                    predicate: (cint) IN (UDFToInteger(ctinyint), UDFToInteger(cbigint)) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1546640 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -40,7 +40,7 @@ STAGE PLANS:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Predicate expression for FILTER operator: Cannot vectorize IN() - casting a column is not supported. Column type is int but the common type is bigint
+                notVectorizedReason: Predicate expression for FILTER operator: Vectorizing IN expression only supported for constant values
                 vectorized: false
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out
index d835f5c..de62ca2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_non_string_partition.q.out
@@ -74,10 +74,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1024 Data size: 4096 Basic stats: COMPLETE Column stats: PARTIAL
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: tinyint)
@@ -88,7 +87,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -199,10 +198,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: string)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1024 Data size: 121205 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -212,7 +210,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out b/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
index 6552d2b..6225986 100644
--- a/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
@@ -113,7 +113,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: key (type: int)
             Execution mode: vectorized, llap
@@ -255,7 +255,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: key (type: int)
             Execution mode: vectorized, llap
@@ -290,7 +290,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: int)
             Execution mode: vectorized, llap
@@ -410,7 +410,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: key (type: int)
             Execution mode: vectorized, llap
@@ -438,7 +438,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: value (type: int)
             Execution mode: vectorized, llap
@@ -599,7 +599,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkMultiKeyOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -633,7 +633,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkMultiKeyOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -752,7 +752,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkMultiKeyOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -779,7 +779,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkMultiKeyOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -996,7 +996,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: key (type: int)
             Execution mode: vectorized, llap
@@ -1138,7 +1138,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: key (type: int)
             Execution mode: vectorized, llap
@@ -1173,7 +1173,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: int)
             Execution mode: vectorized, llap
@@ -1293,7 +1293,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: key (type: int)
             Execution mode: vectorized, llap
@@ -1321,7 +1321,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkLongOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     value expressions: value (type: int)
             Execution mode: vectorized, llap
@@ -1482,7 +1482,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkMultiKeyOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1516,7 +1516,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkMultiKeyOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1635,7 +1635,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkMultiKeyOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1662,7 +1662,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkMultiKeyOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
index 5c20455..aa1b9d8 100644
--- a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
@@ -144,7 +144,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2001 Data size: 273608 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean)
             Execution mode: vectorized, llap
@@ -268,7 +268,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2001 Data size: 273608 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_order_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_order_null.q.out b/ql/src/test/results/clientpositive/llap/vector_order_null.q.out
index 9df6f7a..eb61d02 100644
--- a/ql/src/test/results/clientpositive/llap/vector_order_null.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_order_null.q.out
@@ -105,7 +105,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -223,7 +223,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -341,7 +341,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -459,7 +459,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -577,7 +577,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -695,7 +695,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -813,7 +813,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -931,7 +931,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -1049,7 +1049,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -1167,7 +1167,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -1285,7 +1285,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: no inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
index 2a8eda6..7faf892 100644
--- a/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
@@ -160,7 +160,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -202,7 +202,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
index 5c3f0e0..48d4e5c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
@@ -165,7 +165,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 550 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: string)
             Execution mode: vectorized, llap
@@ -251,7 +251,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 6 Data size: 544 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
index c089d00..19c4df4 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
@@ -329,7 +329,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 15 Data size: 3697 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
             Execution mode: vectorized, llap
@@ -497,7 +497,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -743,7 +743,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                             value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -784,7 +784,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -824,7 +824,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 15 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
index f1319a9..7b821a9 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
@@ -329,7 +329,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                             value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -370,7 +370,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 20 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -410,7 +410,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 20 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out b/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
index 3e4ae94..ec79647 100644
--- a/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
@@ -126,7 +126,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -295,7 +295,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -464,7 +464,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -620,7 +620,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -776,7 +776,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
index 2870d9e..e511bd5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
@@ -287,10 +287,9 @@ STAGE PLANS:
                       key expressions: _col5 (type: int), _col2 (type: date)
                       sort order: ++
                       Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false
+                          className: VectorReduceSinkObjectHashOperator
+                          native: true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: timestamp), _col4 (type: float)
@@ -301,7 +300,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -310,7 +309,7 @@ STAGE PLANS:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
                 groupByVectorOutput: true
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
             Reduce Operator Tree:
@@ -332,10 +331,9 @@ STAGE PLANS:
                     key expressions: _col5 (type: int), _col2 (type: date)
                     sort order: ++
                     Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: No TopN IS false
+                        className: VectorReduceSinkObjectHashOperator
+                        native: true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 25 Data size: 7200 Basic stats: COMPLETE Column stats: NONE
                     TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col3 (type: timestamp), _col4 (type: float)
@@ -471,7 +469,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -1197,10 +1195,9 @@ STAGE PLANS:
                       key expressions: _col4 (type: int), _col5 (type: date)
                       sort order: ++
                       Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false
+                          className: VectorReduceSinkObjectHashOperator
+                          native: true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp), _col3 (type: float)
@@ -1211,7 +1208,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -1220,7 +1217,7 @@ STAGE PLANS:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
                 groupByVectorOutput: true
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
             Reduce Operator Tree:
@@ -1242,10 +1239,9 @@ STAGE PLANS:
                     key expressions: _col4 (type: int), _col5 (type: date)
                     sort order: ++
                     Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: No TopN IS false
+                        className: VectorReduceSinkObjectHashOperator
+                        native: true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 25 Data size: 1400 Basic stats: COMPLETE Column stats: PARTIAL
                     TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp), _col3 (type: float)
@@ -1405,7 +1401,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 768 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -2155,10 +2151,9 @@ STAGE PLANS:
                       key expressions: _col4 (type: int), _col5 (type: timestamp)
                       sort order: ++
                       Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false
+                          className: VectorReduceSinkObjectHashOperator
+                          native: true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 137 Data size: 39448 Basic stats: COMPLETE Column stats: PARTIAL
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: date), _col3 (type: float)
@@ -2169,7 +2164,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -2178,7 +2173,7 @@ STAGE PLANS:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
                 groupByVectorOutput: true
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
             Reduce Operator Tree:
@@ -2200,10 +2195,9 @@ STAGE PLANS:
                     key expressions: _col4 (type: int), _col5 (type: timestamp)
                     sort order: ++
                     Reduce Sink Vectorization:
-                        className: VectorReduceSinkOperator
-                        native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        nativeConditionsNotMet: No TopN IS false
+                        className: VectorReduceSinkObjectHashOperator
+                        native: true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 25 Data size: 1000 Basic stats: COMPLETE Column stats: PARTIAL
                     TopN Hash Memory Usage: 0.1
                     value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: date), _col3 (type: float)
@@ -2363,7 +2357,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap


[37/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/tez/explainuser_3.q.out
index e409a36,cbfb33a..42cb5a6
--- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
@@@ -509,11 -509,11 +509,19 @@@ Stage-
                    Conditional Operator
                      Stage-1
                        Map 1 vectorized
++<<<<<<< HEAD
 +                      File Output Operator [FS_8]
 +                        table:{"name:":"default.orc_merge5"}
 +                        Select Operator [SEL_7] (rows=306 width=268)
 +                          Output:["_col0","_col1","_col2","_col3","_col4"]
 +                          Filter Operator [FIL_6] (rows=306 width=268)
++=======
+                       File Output Operator [FS_3]
+                         table:{"name:":"default.orc_merge5"}
+                         Select Operator [SEL_2] (rows=306 width=268)
+                           Output:["_col0","_col1","_col2","_col3","_col4"]
+                           Filter Operator [FIL_4] (rows=306 width=268)
++>>>>>>> master
                              predicate:(userid <= 13)
                              TableScan [TS_0] (rows=919 width=268)
                                default@orc_merge5,orc_merge5,Tbl:COMPLETE,Col:NONE,Output:["userid","string1","subtype","decimal1","ts"]


[46/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 6b80461,1915150..cb1bd59
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@@ -48577,7 -48198,1015 +48726,1015 @@@ public class ThriftHiveMetastore 
  
      @Override
      public String toString() {
-       StringBuilder sb = new StringBuilder("drop_table_with_environment_context_result(");
+       StringBuilder sb = new StringBuilder("drop_table_with_environment_context_result(");
+       boolean first = true;
+ 
+       sb.append("o1:");
+       if (this.o1 == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.o1);
+       }
+       first = false;
+       if (!first) sb.append(", ");
+       sb.append("o3:");
+       if (this.o3 == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.o3);
+       }
+       first = false;
+       sb.append(")");
+       return sb.toString();
+     }
+ 
+     public void validate() throws org.apache.thrift.TException {
+       // check for required fields
+       // check for sub-struct validity
+     }
+ 
+     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+       try {
+         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+       } catch (org.apache.thrift.TException te) {
+         throw new java.io.IOException(te);
+       }
+     }
+ 
+     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+       try {
+         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+       } catch (org.apache.thrift.TException te) {
+         throw new java.io.IOException(te);
+       }
+     }
+ 
+     private static class drop_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory {
+       public drop_table_with_environment_context_resultStandardScheme getScheme() {
+         return new drop_table_with_environment_context_resultStandardScheme();
+       }
+     }
+ 
+     private static class drop_table_with_environment_context_resultStandardScheme extends StandardScheme<drop_table_with_environment_context_result> {
+ 
+       public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+         org.apache.thrift.protocol.TField schemeField;
+         iprot.readStructBegin();
+         while (true)
+         {
+           schemeField = iprot.readFieldBegin();
+           if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+             break;
+           }
+           switch (schemeField.id) {
+             case 1: // O1
+               if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                 struct.o1 = new NoSuchObjectException();
+                 struct.o1.read(iprot);
+                 struct.setO1IsSet(true);
+               } else { 
+                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+               }
+               break;
+             case 2: // O3
+               if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                 struct.o3 = new MetaException();
+                 struct.o3.read(iprot);
+                 struct.setO3IsSet(true);
+               } else { 
+                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+               }
+               break;
+             default:
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+           }
+           iprot.readFieldEnd();
+         }
+         iprot.readStructEnd();
+         struct.validate();
+       }
+ 
+       public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+         struct.validate();
+ 
+         oprot.writeStructBegin(STRUCT_DESC);
+         if (struct.o1 != null) {
+           oprot.writeFieldBegin(O1_FIELD_DESC);
+           struct.o1.write(oprot);
+           oprot.writeFieldEnd();
+         }
+         if (struct.o3 != null) {
+           oprot.writeFieldBegin(O3_FIELD_DESC);
+           struct.o3.write(oprot);
+           oprot.writeFieldEnd();
+         }
+         oprot.writeFieldStop();
+         oprot.writeStructEnd();
+       }
+ 
+     }
+ 
+     private static class drop_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory {
+       public drop_table_with_environment_context_resultTupleScheme getScheme() {
+         return new drop_table_with_environment_context_resultTupleScheme();
+       }
+     }
+ 
+     private static class drop_table_with_environment_context_resultTupleScheme extends TupleScheme<drop_table_with_environment_context_result> {
+ 
+       @Override
+       public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+         TTupleProtocol oprot = (TTupleProtocol) prot;
+         BitSet optionals = new BitSet();
+         if (struct.isSetO1()) {
+           optionals.set(0);
+         }
+         if (struct.isSetO3()) {
+           optionals.set(1);
+         }
+         oprot.writeBitSet(optionals, 2);
+         if (struct.isSetO1()) {
+           struct.o1.write(oprot);
+         }
+         if (struct.isSetO3()) {
+           struct.o3.write(oprot);
+         }
+       }
+ 
+       @Override
+       public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException {
+         TTupleProtocol iprot = (TTupleProtocol) prot;
+         BitSet incoming = iprot.readBitSet(2);
+         if (incoming.get(0)) {
+           struct.o1 = new NoSuchObjectException();
+           struct.o1.read(iprot);
+           struct.setO1IsSet(true);
+         }
+         if (incoming.get(1)) {
+           struct.o3 = new MetaException();
+           struct.o3.read(iprot);
+           struct.setO3IsSet(true);
+         }
+       }
+     }
+ 
+   }
+ 
+   public static class truncate_table_args implements org.apache.thrift.TBase<truncate_table_args, truncate_table_args._Fields>, java.io.Serializable, Cloneable, Comparable<truncate_table_args>   {
+     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_table_args");
+ 
+     private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+     private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
+     private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)3);
+ 
+     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+     static {
+       schemes.put(StandardScheme.class, new truncate_table_argsStandardSchemeFactory());
+       schemes.put(TupleScheme.class, new truncate_table_argsTupleSchemeFactory());
+     }
+ 
+     private String dbName; // required
+     private String tableName; // required
+     private List<String> partNames; // required
+ 
+     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+       DB_NAME((short)1, "dbName"),
+       TABLE_NAME((short)2, "tableName"),
+       PART_NAMES((short)3, "partNames");
+ 
+       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+       static {
+         for (_Fields field : EnumSet.allOf(_Fields.class)) {
+           byName.put(field.getFieldName(), field);
+         }
+       }
+ 
+       /**
+        * Find the _Fields constant that matches fieldId, or null if its not found.
+        */
+       public static _Fields findByThriftId(int fieldId) {
+         switch(fieldId) {
+           case 1: // DB_NAME
+             return DB_NAME;
+           case 2: // TABLE_NAME
+             return TABLE_NAME;
+           case 3: // PART_NAMES
+             return PART_NAMES;
+           default:
+             return null;
+         }
+       }
+ 
+       /**
+        * Find the _Fields constant that matches fieldId, throwing an exception
+        * if it is not found.
+        */
+       public static _Fields findByThriftIdOrThrow(int fieldId) {
+         _Fields fields = findByThriftId(fieldId);
+         if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+         return fields;
+       }
+ 
+       /**
+        * Find the _Fields constant that matches name, or null if its not found.
+        */
+       public static _Fields findByName(String name) {
+         return byName.get(name);
+       }
+ 
+       private final short _thriftId;
+       private final String _fieldName;
+ 
+       _Fields(short thriftId, String fieldName) {
+         _thriftId = thriftId;
+         _fieldName = fieldName;
+       }
+ 
+       public short getThriftFieldId() {
+         return _thriftId;
+       }
+ 
+       public String getFieldName() {
+         return _fieldName;
+       }
+     }
+ 
+     // isset id assignments
+     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+     static {
+       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+       tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+       tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+       tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+           new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+               new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+       metaDataMap = Collections.unmodifiableMap(tmpMap);
+       org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_table_args.class, metaDataMap);
+     }
+ 
+     public truncate_table_args() {
+     }
+ 
+     public truncate_table_args(
+       String dbName,
+       String tableName,
+       List<String> partNames)
+     {
+       this();
+       this.dbName = dbName;
+       this.tableName = tableName;
+       this.partNames = partNames;
+     }
+ 
+     /**
+      * Performs a deep copy on <i>other</i>.
+      */
+     public truncate_table_args(truncate_table_args other) {
+       if (other.isSetDbName()) {
+         this.dbName = other.dbName;
+       }
+       if (other.isSetTableName()) {
+         this.tableName = other.tableName;
+       }
+       if (other.isSetPartNames()) {
+         List<String> __this__partNames = new ArrayList<String>(other.partNames);
+         this.partNames = __this__partNames;
+       }
+     }
+ 
+     public truncate_table_args deepCopy() {
+       return new truncate_table_args(this);
+     }
+ 
+     @Override
+     public void clear() {
+       this.dbName = null;
+       this.tableName = null;
+       this.partNames = null;
+     }
+ 
+     public String getDbName() {
+       return this.dbName;
+     }
+ 
+     public void setDbName(String dbName) {
+       this.dbName = dbName;
+     }
+ 
+     public void unsetDbName() {
+       this.dbName = null;
+     }
+ 
+     /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+     public boolean isSetDbName() {
+       return this.dbName != null;
+     }
+ 
+     public void setDbNameIsSet(boolean value) {
+       if (!value) {
+         this.dbName = null;
+       }
+     }
+ 
+     public String getTableName() {
+       return this.tableName;
+     }
+ 
+     public void setTableName(String tableName) {
+       this.tableName = tableName;
+     }
+ 
+     public void unsetTableName() {
+       this.tableName = null;
+     }
+ 
+     /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+     public boolean isSetTableName() {
+       return this.tableName != null;
+     }
+ 
+     public void setTableNameIsSet(boolean value) {
+       if (!value) {
+         this.tableName = null;
+       }
+     }
+ 
+     public int getPartNamesSize() {
+       return (this.partNames == null) ? 0 : this.partNames.size();
+     }
+ 
+     public java.util.Iterator<String> getPartNamesIterator() {
+       return (this.partNames == null) ? null : this.partNames.iterator();
+     }
+ 
+     public void addToPartNames(String elem) {
+       if (this.partNames == null) {
+         this.partNames = new ArrayList<String>();
+       }
+       this.partNames.add(elem);
+     }
+ 
+     public List<String> getPartNames() {
+       return this.partNames;
+     }
+ 
+     public void setPartNames(List<String> partNames) {
+       this.partNames = partNames;
+     }
+ 
+     public void unsetPartNames() {
+       this.partNames = null;
+     }
+ 
+     /** Returns true if field partNames is set (has been assigned a value) and false otherwise */
+     public boolean isSetPartNames() {
+       return this.partNames != null;
+     }
+ 
+     public void setPartNamesIsSet(boolean value) {
+       if (!value) {
+         this.partNames = null;
+       }
+     }
+ 
+     public void setFieldValue(_Fields field, Object value) {
+       switch (field) {
+       case DB_NAME:
+         if (value == null) {
+           unsetDbName();
+         } else {
+           setDbName((String)value);
+         }
+         break;
+ 
+       case TABLE_NAME:
+         if (value == null) {
+           unsetTableName();
+         } else {
+           setTableName((String)value);
+         }
+         break;
+ 
+       case PART_NAMES:
+         if (value == null) {
+           unsetPartNames();
+         } else {
+           setPartNames((List<String>)value);
+         }
+         break;
+ 
+       }
+     }
+ 
+     public Object getFieldValue(_Fields field) {
+       switch (field) {
+       case DB_NAME:
+         return getDbName();
+ 
+       case TABLE_NAME:
+         return getTableName();
+ 
+       case PART_NAMES:
+         return getPartNames();
+ 
+       }
+       throw new IllegalStateException();
+     }
+ 
+     /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+     public boolean isSet(_Fields field) {
+       if (field == null) {
+         throw new IllegalArgumentException();
+       }
+ 
+       switch (field) {
+       case DB_NAME:
+         return isSetDbName();
+       case TABLE_NAME:
+         return isSetTableName();
+       case PART_NAMES:
+         return isSetPartNames();
+       }
+       throw new IllegalStateException();
+     }
+ 
+     @Override
+     public boolean equals(Object that) {
+       if (that == null)
+         return false;
+       if (that instanceof truncate_table_args)
+         return this.equals((truncate_table_args)that);
+       return false;
+     }
+ 
+     public boolean equals(truncate_table_args that) {
+       if (that == null)
+         return false;
+ 
+       boolean this_present_dbName = true && this.isSetDbName();
+       boolean that_present_dbName = true && that.isSetDbName();
+       if (this_present_dbName || that_present_dbName) {
+         if (!(this_present_dbName && that_present_dbName))
+           return false;
+         if (!this.dbName.equals(that.dbName))
+           return false;
+       }
+ 
+       boolean this_present_tableName = true && this.isSetTableName();
+       boolean that_present_tableName = true && that.isSetTableName();
+       if (this_present_tableName || that_present_tableName) {
+         if (!(this_present_tableName && that_present_tableName))
+           return false;
+         if (!this.tableName.equals(that.tableName))
+           return false;
+       }
+ 
+       boolean this_present_partNames = true && this.isSetPartNames();
+       boolean that_present_partNames = true && that.isSetPartNames();
+       if (this_present_partNames || that_present_partNames) {
+         if (!(this_present_partNames && that_present_partNames))
+           return false;
+         if (!this.partNames.equals(that.partNames))
+           return false;
+       }
+ 
+       return true;
+     }
+ 
+     @Override
+     public int hashCode() {
+       List<Object> list = new ArrayList<Object>();
+ 
+       boolean present_dbName = true && (isSetDbName());
+       list.add(present_dbName);
+       if (present_dbName)
+         list.add(dbName);
+ 
+       boolean present_tableName = true && (isSetTableName());
+       list.add(present_tableName);
+       if (present_tableName)
+         list.add(tableName);
+ 
+       boolean present_partNames = true && (isSetPartNames());
+       list.add(present_partNames);
+       if (present_partNames)
+         list.add(partNames);
+ 
+       return list.hashCode();
+     }
+ 
+     @Override
+     public int compareTo(truncate_table_args other) {
+       if (!getClass().equals(other.getClass())) {
+         return getClass().getName().compareTo(other.getClass().getName());
+       }
+ 
+       int lastComparison = 0;
+ 
+       lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+       if (isSetDbName()) {
+         lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+         if (lastComparison != 0) {
+           return lastComparison;
+         }
+       }
+       lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+       if (isSetTableName()) {
+         lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+         if (lastComparison != 0) {
+           return lastComparison;
+         }
+       }
+       lastComparison = Boolean.valueOf(isSetPartNames()).compareTo(other.isSetPartNames());
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+       if (isSetPartNames()) {
+         lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partNames, other.partNames);
+         if (lastComparison != 0) {
+           return lastComparison;
+         }
+       }
+       return 0;
+     }
+ 
+     public _Fields fieldForId(int fieldId) {
+       return _Fields.findByThriftId(fieldId);
+     }
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+     }
+ 
+     @Override
+     public String toString() {
+       StringBuilder sb = new StringBuilder("truncate_table_args(");
+       boolean first = true;
+ 
+       sb.append("dbName:");
+       if (this.dbName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.dbName);
+       }
+       first = false;
+       if (!first) sb.append(", ");
+       sb.append("tableName:");
+       if (this.tableName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.tableName);
+       }
+       first = false;
+       if (!first) sb.append(", ");
+       sb.append("partNames:");
+       if (this.partNames == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.partNames);
+       }
+       first = false;
+       sb.append(")");
+       return sb.toString();
+     }
+ 
+     public void validate() throws org.apache.thrift.TException {
+       // check for required fields
+       // check for sub-struct validity
+     }
+ 
+     private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+       try {
+         write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+       } catch (org.apache.thrift.TException te) {
+         throw new java.io.IOException(te);
+       }
+     }
+ 
+     private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+       try {
+         read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+       } catch (org.apache.thrift.TException te) {
+         throw new java.io.IOException(te);
+       }
+     }
+ 
+     private static class truncate_table_argsStandardSchemeFactory implements SchemeFactory {
+       public truncate_table_argsStandardScheme getScheme() {
+         return new truncate_table_argsStandardScheme();
+       }
+     }
+ 
+     private static class truncate_table_argsStandardScheme extends StandardScheme<truncate_table_args> {
+ 
+       public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args struct) throws org.apache.thrift.TException {
+         org.apache.thrift.protocol.TField schemeField;
+         iprot.readStructBegin();
+         while (true)
+         {
+           schemeField = iprot.readFieldBegin();
+           if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+             break;
+           }
+           switch (schemeField.id) {
+             case 1: // DB_NAME
+               if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                 struct.dbName = iprot.readString();
+                 struct.setDbNameIsSet(true);
+               } else { 
+                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+               }
+               break;
+             case 2: // TABLE_NAME
+               if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+                 struct.tableName = iprot.readString();
+                 struct.setTableNameIsSet(true);
+               } else { 
+                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+               }
+               break;
+             case 3: // PART_NAMES
+               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+                 {
 -                  org.apache.thrift.protocol.TList _list748 = iprot.readListBegin();
 -                  struct.partNames = new ArrayList<String>(_list748.size);
 -                  String _elem749;
 -                  for (int _i750 = 0; _i750 < _list748.size; ++_i750)
++                  org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
++                  struct.partNames = new ArrayList<String>(_list756.size);
++                  String _elem757;
++                  for (int _i758 = 0; _i758 < _list756.size; ++_i758)
+                   {
 -                    _elem749 = iprot.readString();
 -                    struct.partNames.add(_elem749);
++                    _elem757 = iprot.readString();
++                    struct.partNames.add(_elem757);
+                   }
+                   iprot.readListEnd();
+                 }
+                 struct.setPartNamesIsSet(true);
+               } else { 
+                 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+               }
+               break;
+             default:
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+           }
+           iprot.readFieldEnd();
+         }
+         iprot.readStructEnd();
+         struct.validate();
+       }
+ 
+       public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_args struct) throws org.apache.thrift.TException {
+         struct.validate();
+ 
+         oprot.writeStructBegin(STRUCT_DESC);
+         if (struct.dbName != null) {
+           oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+           oprot.writeString(struct.dbName);
+           oprot.writeFieldEnd();
+         }
+         if (struct.tableName != null) {
+           oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+           oprot.writeString(struct.tableName);
+           oprot.writeFieldEnd();
+         }
+         if (struct.partNames != null) {
+           oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
+           {
+             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
 -            for (String _iter751 : struct.partNames)
++            for (String _iter759 : struct.partNames)
+             {
 -              oprot.writeString(_iter751);
++              oprot.writeString(_iter759);
+             }
+             oprot.writeListEnd();
+           }
+           oprot.writeFieldEnd();
+         }
+         oprot.writeFieldStop();
+         oprot.writeStructEnd();
+       }
+ 
+     }
+ 
+     private static class truncate_table_argsTupleSchemeFactory implements SchemeFactory {
+       public truncate_table_argsTupleScheme getScheme() {
+         return new truncate_table_argsTupleScheme();
+       }
+     }
+ 
+     private static class truncate_table_argsTupleScheme extends TupleScheme<truncate_table_args> {
+ 
+       @Override
+       public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args struct) throws org.apache.thrift.TException {
+         TTupleProtocol oprot = (TTupleProtocol) prot;
+         BitSet optionals = new BitSet();
+         if (struct.isSetDbName()) {
+           optionals.set(0);
+         }
+         if (struct.isSetTableName()) {
+           optionals.set(1);
+         }
+         if (struct.isSetPartNames()) {
+           optionals.set(2);
+         }
+         oprot.writeBitSet(optionals, 3);
+         if (struct.isSetDbName()) {
+           oprot.writeString(struct.dbName);
+         }
+         if (struct.isSetTableName()) {
+           oprot.writeString(struct.tableName);
+         }
+         if (struct.isSetPartNames()) {
+           {
+             oprot.writeI32(struct.partNames.size());
 -            for (String _iter752 : struct.partNames)
++            for (String _iter760 : struct.partNames)
+             {
 -              oprot.writeString(_iter752);
++              oprot.writeString(_iter760);
+             }
+           }
+         }
+       }
+ 
+       @Override
+       public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args struct) throws org.apache.thrift.TException {
+         TTupleProtocol iprot = (TTupleProtocol) prot;
+         BitSet incoming = iprot.readBitSet(3);
+         if (incoming.get(0)) {
+           struct.dbName = iprot.readString();
+           struct.setDbNameIsSet(true);
+         }
+         if (incoming.get(1)) {
+           struct.tableName = iprot.readString();
+           struct.setTableNameIsSet(true);
+         }
+         if (incoming.get(2)) {
+           {
 -            org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -            struct.partNames = new ArrayList<String>(_list753.size);
 -            String _elem754;
 -            for (int _i755 = 0; _i755 < _list753.size; ++_i755)
++            org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.partNames = new ArrayList<String>(_list761.size);
++            String _elem762;
++            for (int _i763 = 0; _i763 < _list761.size; ++_i763)
+             {
 -              _elem754 = iprot.readString();
 -              struct.partNames.add(_elem754);
++              _elem762 = iprot.readString();
++              struct.partNames.add(_elem762);
+             }
+           }
+           struct.setPartNamesIsSet(true);
+         }
+       }
+     }
+ 
+   }
+ 
+   public static class truncate_table_result implements org.apache.thrift.TBase<truncate_table_result, truncate_table_result._Fields>, java.io.Serializable, Cloneable, Comparable<truncate_table_result>   {
+     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_table_result");
+ 
+     private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ 
+     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+     static {
+       schemes.put(StandardScheme.class, new truncate_table_resultStandardSchemeFactory());
+       schemes.put(TupleScheme.class, new truncate_table_resultTupleSchemeFactory());
+     }
+ 
+     private MetaException o1; // required
+ 
+     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+       O1((short)1, "o1");
+ 
+       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+       static {
+         for (_Fields field : EnumSet.allOf(_Fields.class)) {
+           byName.put(field.getFieldName(), field);
+         }
+       }
+ 
+       /**
+        * Find the _Fields constant that matches fieldId, or null if its not found.
+        */
+       public static _Fields findByThriftId(int fieldId) {
+         switch(fieldId) {
+           case 1: // O1
+             return O1;
+           default:
+             return null;
+         }
+       }
+ 
+       /**
+        * Find the _Fields constant that matches fieldId, throwing an exception
+        * if it is not found.
+        */
+       public static _Fields findByThriftIdOrThrow(int fieldId) {
+         _Fields fields = findByThriftId(fieldId);
+         if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+         return fields;
+       }
+ 
+       /**
+        * Find the _Fields constant that matches name, or null if its not found.
+        */
+       public static _Fields findByName(String name) {
+         return byName.get(name);
+       }
+ 
+       private final short _thriftId;
+       private final String _fieldName;
+ 
+       _Fields(short thriftId, String fieldName) {
+         _thriftId = thriftId;
+         _fieldName = fieldName;
+       }
+ 
+       public short getThriftFieldId() {
+         return _thriftId;
+       }
+ 
+       public String getFieldName() {
+         return _fieldName;
+       }
+     }
+ 
+     // isset id assignments
+     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+     static {
+       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+       tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+       metaDataMap = Collections.unmodifiableMap(tmpMap);
+       org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_table_result.class, metaDataMap);
+     }
+ 
+     public truncate_table_result() {
+     }
+ 
+     public truncate_table_result(
+       MetaException o1)
+     {
+       this();
+       this.o1 = o1;
+     }
+ 
+     /**
+      * Performs a deep copy on <i>other</i>.
+      */
+     public truncate_table_result(truncate_table_result other) {
+       if (other.isSetO1()) {
+         this.o1 = new MetaException(other.o1);
+       }
+     }
+ 
+     public truncate_table_result deepCopy() {
+       return new truncate_table_result(this);
+     }
+ 
+     @Override
+     public void clear() {
+       this.o1 = null;
+     }
+ 
+     public MetaException getO1() {
+       return this.o1;
+     }
+ 
+     public void setO1(MetaException o1) {
+       this.o1 = o1;
+     }
+ 
+     public void unsetO1() {
+       this.o1 = null;
+     }
+ 
+     /** Returns true if field o1 is set (has been assigned a value) and false otherwise */
+     public boolean isSetO1() {
+       return this.o1 != null;
+     }
+ 
+     public void setO1IsSet(boolean value) {
+       if (!value) {
+         this.o1 = null;
+       }
+     }
+ 
+     public void setFieldValue(_Fields field, Object value) {
+       switch (field) {
+       case O1:
+         if (value == null) {
+           unsetO1();
+         } else {
+           setO1((MetaException)value);
+         }
+         break;
+ 
+       }
+     }
+ 
+     public Object getFieldValue(_Fields field) {
+       switch (field) {
+       case O1:
+         return getO1();
+ 
+       }
+       throw new IllegalStateException();
+     }
+ 
+     /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+     public boolean isSet(_Fields field) {
+       if (field == null) {
+         throw new IllegalArgumentException();
+       }
+ 
+       switch (field) {
+       case O1:
+         return isSetO1();
+       }
+       throw new IllegalStateException();
+     }
+ 
+     @Override
+     public boolean equals(Object that) {
+       if (that == null)
+         return false;
+       if (that instanceof truncate_table_result)
+         return this.equals((truncate_table_result)that);
+       return false;
+     }
+ 
+     public boolean equals(truncate_table_result that) {
+       if (that == null)
+         return false;
+ 
+       boolean this_present_o1 = true && this.isSetO1();
+       boolean that_present_o1 = true && that.isSetO1();
+       if (this_present_o1 || that_present_o1) {
+         if (!(this_present_o1 && that_present_o1))
+           return false;
+         if (!this.o1.equals(that.o1))
+           return false;
+       }
+ 
+       return true;
+     }
+ 
+     @Override
+     public int hashCode() {
+       List<Object> list = new ArrayList<Object>();
+ 
+       boolean present_o1 = true && (isSetO1());
+       list.add(present_o1);
+       if (present_o1)
+         list.add(o1);
+ 
+       return list.hashCode();
+     }
+ 
+     @Override
+     public int compareTo(truncate_table_result other) {
+       if (!getClass().equals(other.getClass())) {
+         return getClass().getName().compareTo(other.getClass().getName());
+       }
+ 
+       int lastComparison = 0;
+ 
+       lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1());
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+       if (isSetO1()) {
+         lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1);
+         if (lastComparison != 0) {
+           return lastComparison;
+         }
+       }
+       return 0;
+     }
+ 
+     public _Fields fieldForId(int fieldId) {
+       return _Fields.findByThriftId(fieldId);
+     }
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+       }
+ 
+     @Override
+     public String toString() {
+       StringBuilder sb = new StringBuilder("truncate_table_result(");
        boolean first = true;
  
        sb.append("o1:");
@@@ -49586,13 -50182,13 +50710,13 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
--                  struct.success = new ArrayList<String>(_list756.size);
--                  String _elem757;
--                  for (int _i758 = 0; _i758 < _list756.size; ++_i758)
++                  org.apache.thrift.protocol.TList _list764 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list764.size);
++                  String _elem765;
++                  for (int _i766 = 0; _i766 < _list764.size; ++_i766)
                    {
--                    _elem757 = iprot.readString();
--                    struct.success.add(_elem757);
++                    _elem765 = iprot.readString();
++                    struct.success.add(_elem765);
                    }
                    iprot.readListEnd();
                  }
@@@ -49627,9 -50223,9 +50751,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
--            for (String _iter759 : struct.success)
++            for (String _iter767 : struct.success)
              {
--              oprot.writeString(_iter759);
++              oprot.writeString(_iter767);
              }
              oprot.writeListEnd();
            }
@@@ -49668,9 -50264,9 +50792,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
--            for (String _iter760 : struct.success)
++            for (String _iter768 : struct.success)
              {
--              oprot.writeString(_iter760);
++              oprot.writeString(_iter768);
              }
            }
          }
@@@ -49685,13 -50281,13 +50809,13 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.success = new ArrayList<String>(_list761.size);
--            String _elem762;
--            for (int _i763 = 0; _i763 < _list761.size; ++_i763)
++            org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list769.size);
++            String _elem770;
++            for (int _i771 = 0; _i771 < _list769.size; ++_i771)
              {
--              _elem762 = iprot.readString();
--              struct.success.add(_elem762);
++              _elem770 = iprot.readString();
++              struct.success.add(_elem770);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -50665,13 -51261,13 +51789,13 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list764 = iprot.readListBegin();
--                  struct.success = new ArrayList<String>(_list764.size);
--                  String _elem765;
--                  for (int _i766 = 0; _i766 < _list764.size; ++_i766)
++                  org.apache.thrift.protocol.TList _list772 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list772.size);
++                  String _elem773;
++                  for (int _i774 = 0; _i774 < _list772.size; ++_i774)
                    {
--                    _elem765 = iprot.readString();
--                    struct.success.add(_elem765);
++                    _elem773 = iprot.readString();
++                    struct.success.add(_elem773);
                    }
                    iprot.readListEnd();
                  }
@@@ -50706,9 -51302,9 +51830,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
--            for (String _iter767 : struct.success)
++            for (String _iter775 : struct.success)
              {
--              oprot.writeString(_iter767);
++              oprot.writeString(_iter775);
              }
              oprot.writeListEnd();
            }
@@@ -50747,9 -51343,9 +51871,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
--            for (String _iter768 : struct.success)
++            for (String _iter776 : struct.success)
              {
--              oprot.writeString(_iter768);
++              oprot.writeString(_iter776);
              }
            }
          }
@@@ -50764,13 -51360,13 +51888,13 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.success = new ArrayList<String>(_list769.size);
--            String _elem770;
--            for (int _i771 = 0; _i771 < _list769.size; ++_i771)
++            org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list777.size);
++            String _elem778;
++            for (int _i779 = 0; _i779 < _list777.size; ++_i779)
              {
--              _elem770 = iprot.readString();
--              struct.success.add(_elem770);
++              _elem778 = iprot.readString();
++              struct.success.add(_elem778);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -51275,13 -51871,13 +52399,13 @@@
              case 3: // TBL_TYPES
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list772 = iprot.readListBegin();
--                  struct.tbl_types = new ArrayList<String>(_list772.size);
--                  String _elem773;
--                  for (int _i774 = 0; _i774 < _list772.size; ++_i774)
++                  org.apache.thrift.protocol.TList _list780 = iprot.readListBegin();
++                  struct.tbl_types = new ArrayList<String>(_list780.size);
++                  String _elem781;
++                  for (int _i782 = 0; _i782 < _list780.size; ++_i782)
                    {
--                    _elem773 = iprot.readString();
--                    struct.tbl_types.add(_elem773);
++                    _elem781 = iprot.readString();
++                    struct.tbl_types.add(_elem781);
                    }
                    iprot.readListEnd();
                  }
@@@ -51317,9 -51913,9 +52441,9 @@@
            oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size()));
--            for (String _iter775 : struct.tbl_types)
++            for (String _iter783 : struct.tbl_types)
              {
--              oprot.writeString(_iter775);
++              oprot.writeString(_iter783);
              }
              oprot.writeListEnd();
            }
@@@ -51362,9 -51958,9 +52486,9 @@@
          if (struct.isSetTbl_types()) {
            {
              oprot.writeI32(struct.tbl_types.size());
--            for (String _iter776 : struct.tbl_types)
++            for (String _iter784 : struct.tbl_types)
              {
--              oprot.writeString(_iter776);
++              oprot.writeString(_iter784);
              }
            }
          }
@@@ -51384,13 -51980,13 +52508,13 @@@
          }
          if (incoming.get(2)) {
            {
--            org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.tbl_types = new ArrayList<String>(_list777.size);
--            String _elem778;
--            for (int _i779 = 0; _i779 < _list777.size; ++_i779)
++            org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.tbl_types = new ArrayList<String>(_list785.size);
++            String _elem786;
++            for (int _i787 = 0; _i787 < _list785.size; ++_i787)
              {
--              _elem778 = iprot.readString();
--              struct.tbl_types.add(_elem778);
++              _elem786 = iprot.readString();
++              struct.tbl_types.add(_elem786);
              }
            }
            struct.setTbl_typesIsSet(true);
@@@ -51796,14 -52392,14 +52920,14 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list780 = iprot.readListBegin();
--                  struct.success = new ArrayList<TableMeta>(_list780.size);
--                  TableMeta _elem781;
--                  for (int _i782 = 0; _i782 < _list780.size; ++_i782)
++                  org.apache.thrift.protocol.TList _list788 = iprot.readListBegin();
++                  struct.success = new ArrayList<TableMeta>(_list788.size);
++                  TableMeta _elem789;
++                  for (int _i790 = 0; _i790 < _list788.size; ++_i790)
                    {
--                    _elem781 = new TableMeta();
--                    _elem781.read(iprot);
--                    struct.success.add(_elem781);
++                    _elem789 = new TableMeta();
++                    _elem789.read(iprot);
++                    struct.success.add(_elem789);
                    }
                    iprot.readListEnd();
                  }
@@@ -51838,9 -52434,9 +52962,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
--            for (TableMeta _iter783 : struct.success)
++            for (TableMeta _iter791 : struct.success)
              {
--              _iter783.write(oprot);
++              _iter791.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -51879,9 -52475,9 +53003,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
--            for (TableMeta _iter784 : struct.success)
++            for (TableMeta _iter792 : struct.success)
              {
--              _iter784.write(oprot);
++              _iter792.write(oprot);
              }
            }
          }
@@@ -51896,14 -52492,14 +53020,14 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
--            struct.success = new ArrayList<TableMeta>(_list785.size);
--            TableMeta _elem786;
--            for (int _i787 = 0; _i787 < _list785.size; ++_i787)
++            org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.success = new ArrayList<TableMeta>(_list793.size);
++            TableMeta _elem794;
++            for (int _i795 = 0; _i795 < _list793.size; ++_i795)
              {
--              _elem786 = new TableMeta();
--              _elem786.read(iprot);
--              struct.success.add(_elem786);
++              _elem794 = new TableMeta();
++              _elem794.read(iprot);
++              struct.success.add(_elem794);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -52669,13 -53265,13 +53793,13 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list788 = iprot.readListBegin();
--                  struct.success = new ArrayList<String>(_list788.size);
--                  String _elem789;
--                  for (int _i790 = 0; _i790 < _list788.size; ++_i790)
++                  org.apache.thrift.protocol.TList _list796 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list796.size);
++                  String _elem797;
++                  for (int _i798 = 0; _i798 < _list796.size; ++_i798)
                    {
--                    _elem789 = iprot.readString();
--                    struct.success.add(_elem789);
++                    _elem797 = iprot.readString();
++                    struct.success.add(_elem797);
                    }
                    iprot.readListEnd();
                  }
@@@ -52710,9 -53306,9 +53834,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
--            for (String _iter791 : struct.success)
++            for (String _iter799 : struct.success)
              {
--              oprot.writeString(_iter791);
++              oprot.writeString(_iter799);
              }
              oprot.writeListEnd();
            }
@@@ -52751,9 -53347,9 +53875,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
--            for (String _iter792 : struct.success)
++            for (String _iter800 : struct.success)
              {
--              oprot.writeString(_iter792);
++              oprot.writeString(_iter800);
              }
            }
          }
@@@ -52768,13 -53364,13 +53892,13 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.success = new ArrayList<String>(_list793.size);
--            String _elem794;
--            for (int _i795 = 0; _i795 < _list793.size; ++_i795)
++            org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list801.size);
++            String _elem802;
++            for (int _i803 = 0; _i803 < _list801.size; ++_i803)
              {
--              _elem794 = iprot.readString();
--              struct.success.add(_elem794);
++              _elem802 = iprot.readString();
++              struct.success.add(_elem802);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -54227,13 -54823,13 +55351,13 @@@
              case 2: // TBL_NAMES
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list796 = iprot.readListBegin();
--                  struct.tbl_names = new ArrayList<String>(_list796.size);
--                  String _elem797;
--                  for (int _i798 = 0; _i798 < _list796.size; ++_i798)
++                  org.apache.thrift.protocol.TList _list804 = iprot.readListBegin();
++                  struct.tbl_names = new ArrayList<String>(_list804.size);
++                  String _elem805;
++                  for (int _i806 = 0; _i806 < _list804.size; ++_i806)
                    {
--                    _elem797 = iprot.readString();
--                    struct.tbl_names.add(_elem797);
++                    _elem805 = iprot.readString();
++                    struct.tbl_names.add(_elem805);
                    }
                    iprot.readListEnd();
                  }
@@@ -54264,9 -54860,9 +55388,9 @@@
            oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size()));
--            for (String _iter799 : struct.tbl_names)
++            for (String _iter807 : struct.tbl_names)
              {
--              oprot.writeString(_iter799);
++              oprot.writeString(_iter807);
              }
              oprot.writeListEnd();
            }
@@@ -54303,9 -54899,9 +55427,9 @@@
          if (struct.isSetTbl_names()) {
            {
              oprot.writeI32(struct.tbl_names.size());
--            for (String _iter800 : struct.tbl_names)
++            for (String _iter808 : struct.tbl_names)
              {
--              oprot.writeString(_iter800);
++              oprot.writeString(_iter808);
              }
            }
          }
@@@ -54321,13 -54917,13 +55445,13 @@@
          }
          if (incoming.get(1)) {
            {
--            org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.tbl_names = new ArrayList<String>(_list801.size);
--            String _elem802;
--            for (int _i803 = 0; _i803 < _list801.size; ++_i803)
++            org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.tbl_names = new ArrayList<String>(_list809.size);
++            String _elem810;
++            for (int _i811 = 0; _i811 < _list809.size; ++_i811)
              {
--              _elem802 = iprot.readString();
--              struct.tbl_names.add(_elem802);
++              _elem810 = iprot.readString();
++              struct.tbl_names.add(_elem810);
              }
            }
            struct.setTbl_namesIsSet(true);
@@@ -54652,14 -55248,14 +55776,14 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list804 = iprot.readListBegin();
--                  struct.success = new ArrayList<Table>(_list804.size);
--                  Table _elem805;
--                  for (int _i806 = 0; _i806 < _list804.size; ++_i806)
++                  org.apache.thrift.protocol.TList _list812 = iprot.readListBegin();
++                  struct.success = new ArrayList<Table>(_list812.size);
++                  Table _elem813;
++                  for (int _i814 = 0; _i814 < _list812.size; ++_i814)
                    {
--                    _elem805 = new Table();
--                    _elem805.read(iprot);
--                    struct.success.add(_elem805);
++                    _elem813 = new Table();
++                    _elem813.read(iprot);
++                    struct.success.add(_elem813);
                    }
                    iprot.readListEnd();
                  }
@@@ -54685,9 -55281,9 +55809,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
--            for (Table _iter807 : struct.success)
++            for (Table _iter815 : struct.success)
              {
--              _iter807.write(oprot);
++              _iter815.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -54718,9 -55314,9 +55842,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
--            for (Table _iter808 : struct.success)
++            for (Table _iter816 : struct.success)
              {
--              _iter808.write(oprot);
++              _iter816.write(oprot);
              }
            }
          }
@@@ -54732,14 -55328,14 +55856,14 @@@
          BitSet incoming = iprot.readBitSet(1);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
--            struct.success = new ArrayList<Table>(_list809.size);
--            Table _elem810;
--            for (int _i811 = 0; _i811 < _list809.size; ++_i811)
++            org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.success = new ArrayList<Table>(_list817.size);
++            Table _elem818;
++            for (int _i819 = 0; _i819 < _list817.size; ++_i819)
              {
--              _elem810 = new Table();
--              _elem810.read(iprot);
--              struct.success.add(_elem810);
++              _elem818 = new Table();
++              _elem818.read(iprot);
++              struct.success.add(_elem818);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -57852,13 -58448,13 +58976,13 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list812 = iprot.readListBegin();
--                  struct.success = new ArrayList<String>(_list812.size);
--                  String _elem813;
--                  for (int _i814 = 0; _i814 < _list812.size; ++_i814)
++                  org.apache.thrift.protocol.TList _list820 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list820.size);
++                  String _elem821;
++                  for (int _i822 = 0; _i822 < _list820.size; ++_i822)
                    {
--                    _elem813 = iprot.readString();
--                    struct.success.add(_elem813);
++                    _elem821 = iprot.readString();
++                    struct.success.add(_elem821);
                    }
                    iprot.readListEnd();
                  }
@@@ -57911,9 -58507,9 +59035,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
--            for (String _iter815 : struct.success)
++            for (String _iter823 : struct.success)
              {
--              oprot.writeString(_iter815);
++              oprot.writeString(_iter823);
              }
              oprot.writeListEnd();
            }
@@@ -57968,9 -58564,9 +59092,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
--            for (String _iter816 : struct.success)
++            for (String _iter824 : struct.success)
              {
--              oprot.writeString(_iter816);
++              oprot.writeString(_iter824);
              }
            }
          }
@@@ -57991,13 -58587,13 +59115,13 @@@
          BitSet incoming = iprot.readBitSet(4);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.success = new ArrayList<String>(_list817.size);
--            String _elem818;
--            for (int _i819 = 0; _i819 < _list817.size; ++_i819)
++            org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list825.size);
++            String _elem826;
++            for (int _i827 = 0; _i827 < _list825.size; ++_i827)
              {
--              _elem818 = iprot.readString();
--              struct.success.add(_elem818);
++              _elem826 = iprot.readString();
++              struct.success.add(_elem826);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -63856,14 -64452,14 +64980,14 @@@
              case 1: // NEW_PARTS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list820 = iprot.readListBegin();
--                  struct.new_parts = new ArrayList<Partition>(_list820.size);
--                  Partition _elem821;
--                  for (int _i822 = 0; _i822 < _list820.size; ++_i822)
++                  org.apache.thrift.protocol.TList _list828 = iprot.readListBegin();
++                  struct.new_parts = new ArrayList<Partition>(_list828.size);
++                  Partition _elem829;
++                  for (int _i830 = 0; _i830 < _list828.size; ++_i830)
                    {
--                    _elem821 = new Partition();
--                    _elem821.read(iprot);
--                    struct.new_parts.add(_elem821);
++                    _elem829 = new Partition();
++                    _elem829.read(iprot);
++                    struct.new_parts.add(_elem829);
                    }
                    iprot.readListEnd();
                  }
@@@ -63889,9 -64485,9 +65013,9 @@@
            oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size()));
--            for (Partition _iter823 : struct.new_parts)
++            for (Partition _iter831 : struct.new_parts)
              {
--              _iter823.write(oprot);
++              _iter831.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -63922,9 -64518,9 +65046,9 @@@
          if (struct.isSetNew_parts()) {
            {
              oprot.writeI32(struct.new_parts.size());
--            for (Partition _iter824 : struct.new_parts)
++            for (Partition _iter832 : struct.new_parts)
              {
--              _iter824.write(oprot);
++              _iter832.write(oprot);
              }
            }
          }
@@@ -63936,14 -64532,14 +65060,14 @@@
          BitSet incoming = iprot.readBitSet(1);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
--            struct.new_parts = new ArrayList<Partition>(_list825.size);
--            Partition _elem826;
--            for (int _i827 = 0; _i827 < _list825.size; ++_i827)
++            org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.new_parts = new ArrayList<Partition>(_list833.size);
++            Partition _elem834;
++            for (int _i835 = 0; _i835 < _list833.size; ++_i835)
              {
--              _elem826 = new Partition();
--              _elem826.read(iprot);
--              struct.new_parts.add(_elem826);
++              _elem834 = new Partition();
++              _elem834.read(iprot);
++              struct.new_parts.add(_elem834);
              }
            }
            struct.setNew_partsIsSet(true);
@@@ -64944,14 -65540,14 +66068,14 @@@
              case 1: // NEW_PARTS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list828 = iprot.readListBegin();
--                  struct.new_parts = new ArrayList<PartitionSpec>(_list828.size);
--                  PartitionSpec _elem829;
--                  for (int _i830 = 0; _i830 < _list828.size; ++_i830)
++                  org.apache.thrift.protocol.TList _list836 = iprot.readListBegin();
++                  struct.new_parts = new ArrayList<PartitionSpec>(_list836.size);
++                  PartitionSpec _elem837;
++                  for (int _i838 = 0; _i838 < _list836.size; ++_i838)
                    {
--                    _elem829 = new PartitionSpec();
--                    _elem829.read(iprot);
--                    struct.new_parts.add(_elem829);
++                    _elem837 = new PartitionSpec();
++                    _elem837.read(iprot);
++                    struct.new_parts.add(_elem837);
                    }
                    iprot.readListEnd();
                  }
@@@ -64977,9 -65573,9 +66101,9 @@@
            oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size()));
--            for (PartitionSpec _iter831 : struct.new_parts)
++            for (PartitionSpec _iter839 : struct.new_parts)
              {
--              _iter831.write(oprot);
++              _iter839.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -65010,9 -65606,9 +66134,9 @@@
          if (struct.isSetNew_parts()) {
            {
              oprot.writeI32(struct.new_parts.size());
--            for (PartitionSpec _iter832 : struct.new_parts)
++            for (PartitionSpec _iter840 : struct.new_parts)
              {
--              _iter832.write(oprot);
++              _iter840.write(oprot);
              }
            }
          }
@@@ -65024,14 -65620,14 +66148,14 @@@
          BitSet incoming = iprot.readBitSet(1);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
--            struct.new_parts = new ArrayList<PartitionSpec>(_list833.size);
--            PartitionSpec _elem834;
--            for (int _i835 = 0; _i835 < _list833.size; ++_i835)
++            org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.new_parts = new ArrayList<PartitionSpec>(_list841.size);
++            PartitionSpec _elem842;
++            for (int _i843 = 0; _i843 < _list841.size; ++_i843)
              {
--              _elem834 = new PartitionSpec();
--              _elem834.read(iprot);
--              struct.new_parts.add(_elem834);
++              _elem842 = new PartitionSpec();
++              _elem842.read(iprot);
++              struct.new_parts.add(_elem842);
              }
            }
            struct.setNew_partsIsSet(true);
@@@ -66207,13 -66803,13 +67331,13 @@@
              case 3: // PART_VALS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list836 = iprot.readListBegin();
--                  struct.part_vals = new ArrayList<String>(_list836.size);
--                  String _elem837;
--                  for (int _i838 = 0; _i838 < _list836.size; ++_i838)
++                  org.apache.thrift.protocol.TList _list844 = iprot.readListBegin();
++                  struct.part_vals = new ArrayList<String>(_list844.size);
++                  String _elem845;
++                  for (int _i846 = 0; _i846 < _list844.size; ++_i846)
                    {
--                    _elem837 = iprot.readString();
--                    struct.part_vals.add(_elem837);
++                    _elem845 = iprot.readString();
++                    struct.part_vals.add(_elem845);
                    }
                    iprot.readListEnd();
                  }
@@@ -66249,9 -66845,9 +67373,9 @@@
            oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size()));
--            for (String _iter839 : struct.part_vals)
++            for (String _iter847 : struct.part_vals)
              {
--              oprot.writeString(_iter839);
++              oprot.writeString(_iter847);
              }
              oprot.writeListEnd();
            }
@@@ -66294,9 -66890,9 +67418,9 @@@
          if (struct.isSetPart_vals()) {
            {
              oprot.writeI32(struct.part_vals.size());
--            for (String _iter840 : struct.part_vals)
++            for (String _iter848 : struct.part_vals)
              {
--              oprot.writeString(_iter840);
++              oprot.writeString(_iter848);
              }
            }
          }
@@@ -66316,13 -66912,13 +67440,13 @@@
          }
          if (incoming.get(2)) {
            {
--            org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.part_vals = new ArrayList<String>(_list841.size);
--            String _elem842;
--            for (int _i843 = 0; _i843 < _list841.size; ++_i843)
++            org.apache.thrift.protocol.TList _list849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.part_vals = new ArrayList<String>(_list849.size);
++            String _elem850;
++            for (int _i851 = 0; _i851 < _list849.size; ++_i851)
              {
--              _elem842 = iprot.readString();
--              struct.part_vals.add(_elem842);
++              _elem850 = iprot.readString();
++              struct.part_vals.add(_elem850);
              }
            }
            struct.setPart_valsIsSet(true);
@@@ -68631,13 -69227,13 +69755,13 @@@
              case 3: // PART_VALS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list844 = iprot.readListBegin();
--                  struct.part_vals = new ArrayList<String>(_list844.size);
--                  String _elem845;
--                  for (int _i846 = 0; _i846 < _list844.size; ++_i846)
++                  org.apache.thrift.protocol.TList _list852 = iprot.readListBegin();
++                  struct.part_vals = new ArrayList<String>(_list852.size);
++                  String _elem853;
++                  for (int _i854 = 0; _i854 < _list852.size; ++_i854)
                    {
--                    _elem845 = iprot.readString();
--                    struct.part_vals.add(_elem845);
++                    _elem853 = iprot.readString();
++                    struct.part_vals.add(_elem853);
                    }
                    iprot.readListEnd();
                  }
@@@ -68682,9 -69278,9 +69806,9 @@@
            oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size()));
--            for (String _iter847 : struct.part_vals)
++            for (String _iter855 : struct.part_vals)
              {
--              oprot.writeString(_iter847);
++              oprot.writeString(_iter855);
              }
              oprot.writeListEnd();
            }
@@@ -68735,9 -69331,9 +69859,9 @@@
          if (struct.isSetPart_vals()) {
            {
              oprot.writeI32(struct.part_vals.size());
--            for (String _iter848 : struct.part_vals)
++            for (String _iter856 : struct.part_vals)
              {
--              oprot.writeString(_iter848);
++              oprot.writeString(_iter856);
              }
            }
          }
@@@ -68760,13 -69356,13 +69884,13 @@@
          }
          if (incoming.get(2)) {
            {
--            org.apache.thrift.protocol.TList _list849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.part_vals = new ArrayList<String>(_list849.size);
--            String _elem850;
--            for (int _i851 = 0; _i851 < _list849.size; ++_i851)
++            org.apache.thrift.protocol.TList _list857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.part_vals = new ArrayList<String>(_list857.size);
++            String _elem858;
++            for (int _i859 = 0; _i859 < _list857.size; ++_i859)
              {
--              _elem850 = iprot.readString();
--              struct.part_vals.add(_elem850);
++              _elem858 = iprot.readString();
++              struct.part_vals.add(_elem858);
              }
            }
            struct.setPart_valsIsSet(true);
@@@ -72636,13 -73232,13 +73760,13 @@@
              case 3: // PART_VALS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list852 = iprot.readListBegin();
--                  struct.part_vals = new ArrayList<String>(_list852.size);
--                  String _elem853;
--                  for (int _i854 = 0; _i854 < _list852.size; ++_i854)
++                  org.apache.thrift.protocol.TList _list860 = iprot.readListBegin();
++                  struct.part_vals = new ArrayList<String>(_list860.size);
++                  String _elem861;
++                  for (int _i862 = 0; _i862 < _list860.size; ++_i862)
                    {
--                    _elem853 = iprot.readString();
--                    struct.part_vals.add(_elem853);
++                    _elem861 = iprot.readString();
++                    struct.part_vals.add(_elem861);
                    }
                    iprot.readListEnd();
                  }
@@@ -72686,9 -73282,9 +73810,9 @@@
            oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size()));
--            for (String _iter855 : struct.part_vals)
++            for (String _iter863 : struct.part_vals)
              {
--              oprot.writeString(_iter855);
++              oprot.writeString(_iter863);
              }
              oprot.writeListEnd();
            }
@@@ -72737,9 -73333,9 +73861,9 @@@
          if (struct.isSetPart_vals()) {
            {
              oprot.writeI32(struct.part_vals.size());
--            for (String _iter856 : struct.part_vals)
++            for (String _iter864 : struct.part_vals)
              {
--              oprot.writeString(_iter856);
++              oprot.writeString(_iter864);
              }
            }
          }
@@@ -72762,13 -73358,13 +73886,13 @@@
          }
          if (incoming.get(2)) {
            {
--            org.apache.thrift.protocol.TList _list857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.part_vals = new ArrayList<String>(_list857.size);
--            String _elem858;
--            for (int _i859 = 0; _i859 < _list857.size; ++_i859)
++            org.apache.thrift.protocol.TList _list865 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.part_vals = new ArrayList<String>(_list865.size);
++            String _elem866;
++            for (int _i867 = 0; _i867 < _list865.size; ++_i867)
              {
--              _elem858 = iprot.readString();
--              struct.part_vals.add(_elem858);
++              _elem866 = iprot.readString();
++              struct.part_vals.add(_elem866);
              }
            }
            struct.setPart_valsIsSet(true);
@@@ -74007,13 -74603,13 +75131,13 @@@
              case 3: // PART_VALS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list860 = iprot.readListBegin();
--                  struct.part_vals = new ArrayList<String>(_list860.size);
--                  String _elem861;
--                  for (int _i862 = 0; _i862 < _list860.size; ++_i862)
++                  org.apache.thrift.protocol.TList _list868 = iprot.readListBegin();
++                  struct.part_vals = new ArrayList<String>(_list868.size);
++                  String _elem869;
++                  for (int _i870 = 0; _i870 < _list868.size; ++_i870)
                    {
--                    _elem861 = iprot.readString();
--                    struct.part_vals.add(_elem861);
++                    _elem869 = iprot.readString();
++                    struct.part_vals.add(_elem869);
                    }
                    iprot.readListEnd();
                  }
@@@ -74066,9 -74662,9 +75190,9 @@@
            oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size()));
--            for (String _iter863 : struct.part_vals)
++            for (String _iter871 : struct.part_vals)
              {
--              oprot.writeString(_iter863);
++              oprot.writeString(_iter871);
              }
              oprot.writeListEnd();
            }
@@@ -74125,9 -74721,9 +75249,9 @@@
          if (struct.isSetPart_vals()) {
            {
              oprot.writeI32(struct.part_vals.size());
--            for (String _iter864 : struct.part_vals)
++            for (String _iter872 : struct.part_vals)
              {
--              oprot.writeString(_iter864);
++              oprot.writeString(_iter872);
              }
            }
          }
@@@ -74153,13 -74749,13 +75277,13 @@@
          }
          if (incoming.get(2)) {
            {
--            org.apache.thrift.protocol.TList _list865 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.part_vals = new ArrayList<String>(_list865.size);
--            String _elem866;
--            for (int _i867 = 0; _i867 < _list865.size; ++_i867)
++            org.apache.thrift.protocol.TList _list873 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.part_vals = new ArrayList<String>(_list873.size);
++            String _elem874;
++            for (int _i875 = 0; _i875 < _list873.size; ++_i875)
              {
--              _elem866 = iprot.readString();
--              struct.part_vals.add(_elem866);
++              _elem874 = iprot.readString();
++              struct.part_vals.add(_elem874);
              }
            }
            struct.setPart_valsIsSet(true);
@@@ -78761,13 -79357,13 +79885,13 @@@
              case 3: // PART_VALS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
--                  org.apache.thrift.protocol.TList _list868 = iprot.readListBegin();
--                  struct.part_vals = new ArrayList<String>(_list868.size);
--                  String _elem869;
--                  for (int _i870 = 0; _i870 < _list868.size; ++_i870)
++                  org.apache.thrift.protocol.TList _list876 = iprot.readListBegin();
++                  struct.part_vals = new ArrayList<String>(_list876.size);
++                  String _elem877;
++                  for (int _i878 = 0; _i878 < _list876.size; ++_i878)
                    {
--                    _elem869 = iprot.readString();
--                    struct.part_vals.add(_elem869);
++                    _elem877 = iprot.readString();
++                    struct.part_vals.add(_elem877);
                    }
                    iprot.readListEnd();
                  }
@@@ -78803,9 -79399,9 +79927,9 @@@
            oprot.writeFieldBegin(PART_VALS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size()));
--            for (String _iter871 : struct.part_vals)
++            for (String _iter879 : struct.part_vals)
              {
--              oprot.writeString(_iter871);
++              oprot.writeString(_iter879);
              }
              oprot.writeListEnd();
            }
@@@ -78848,9 -79444,9 +79972,9 @@@
          if (struct.isSetPart_vals()) {
            {
              oprot.writeI32(struct.part_vals.size());
--            for (String _iter872 : struct.part_vals)
++            for (String _iter880 : struct.part_vals)
              {
--              oprot.writeString(_iter872);
++              oprot.writeString(_iter880);
              }
            }
          }
@@@ -78870,13 -79466,13 +79994,13 @@@
          }
          if (incoming.get(2)) {
            {
--            org.apache.thrift.protocol.TList _list873 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.part_vals = new ArrayList<String>(_list873.size);
--            String _elem874;
--            for (int _i875 = 0; _i875 < _list873.size; ++_i875)
++            org.apache.thrift.protocol.TList _list881 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.part_vals = new ArrayList<String>(_list881.size);
++            String _elem882;
++            for (int _i883 = 0; _i883 < _list881.size; ++_i883)
              {
--              _elem874 = iprot.readString();
--              struct.part_vals.add(_elem874);
++              _elem882 = iprot.readString();
++              struct.part_vals.add(_elem882);
              }
            }
            struct.setPart_valsIsSet(true);
@@@ -80094,15 -80690,15 +81218,15 @@@
              case 1: // PARTITION_SPECS
                if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                  {
--                  org.apache.thrift.protocol.TMap _map876 = iprot.readMapBegin();
--                  struct.partitionSpecs = new HashMap<String,String>(2*_map876.size);
--                  String _key877;
--                  String _val878;
--                  for (int _i879 = 0; _i879 < _map876.size; ++_i879)
++                  org.apache.thrift.protocol.TMap _map884 = iprot.readMapBegin();
++                  struct.partitionSpecs = new HashMap<String,String>(2*_map884.size);
++                  String _key885;
++                  String _val886;
++                  for (int _i887 = 0; _i887 < _map884.size; ++_i887)
                    {
--                    _key877 = iprot.readString();
--                    _val878 = iprot.readString();
--                    struct.partitionSpecs.put(_key877, _val878);
++                    _key885 = iprot.readString();
++                    _val886 = iprot.readString();
++                    struct.partitionSpecs.put(_key885, _val886);
                    }
                    iprot.readMapEnd();
                  }
@@@ -80160,10 -80756,10 +81284,10 @@@
            oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC);
            {
              oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size()));
--            for (Map.Entry<String, String> _iter880 : struct.partitionSpecs.entrySet())
++            for (Map.Entry<String, String> _iter888 : struct.partitionSpecs.entrySet())
              {
--              oprot.writeString(_iter880.getKey());
--              oprot.writeString(_iter880.getValue());
++              oprot.writeString(_iter888.getKey());
++              oprot.writeString(_iter888.getValue());
              }
              oprot.writeMapEnd();
            }
@@@ -80226,10 -80822,10 +81350,10 @@@
          if (struct.isSetPartitionSpecs()) {
            {
              oprot.writeI32(struct.partitionSpecs.size());
--            for (Map.Entry<String, String> _iter881 : struct.partitionSpecs.entrySet())
++            for (Map.Entry<String, String> _iter889 : struct.partitionSpecs.entrySet())
              {
--              oprot.writeString(_iter881.getKey());
--              oprot.writeString(_iter881.getValue());
++              oprot.writeString(_iter889.getKey());
++              oprot.writeString(_iter889.getValue());
              }
            }
          }
@@@ -80253,15 -80849,15 +81377,15 @@@
          BitSet incoming = iprot.readBitSet(5);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TMap _map882 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.partitionSpecs = new HashMap<String,String>(2*_map882.size);
--            String _key883;
--            String _val884;
--            for (int _i885 = 0; _i885 < _map882.size; ++_i885)
++            org.apache.thrift.protocol.TMap _map890 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.partitionSpecs = new HashMap<String,String>(2*_map890.size);
++            String _key891;
++            String _val892;
++            for (int _i893 = 0; _i893 < _map890.size; ++_i893)
              {
--              _key883 = iprot.readString();
--              _val884 = iprot.readString();
--              struct.partitionSpecs.put(_key883, _val884);
++              _key891 = iprot.readString();
++              _val892 = iprot.readString();
++              struct.partitionSpecs.put(_key891, _val892);
              }
            }
            struct.setPartitionSpecsIsSet(true);
@@@ -81707,15 -82303,15 +82831,15 @@@
              case 1: // PARTITION_SPECS
                if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                  {
--                  org.apache.thrift.protocol.TMap _map886 = iprot.readMapBegin();
--                  struct.partitionSpecs = new HashMap<String,String>(2*_map886.size);
--                  String _key887;
--                  String _val888;
--                  for (int _i889 = 0; _i889 < _map886.size; ++_i889)
++                  org.apache.thrift.protocol.TMap _map894 = iprot.readMapBegin();
++                  struct.partitionSpecs = new HashMap<String,String>(2*_map894.size);
++                  String _key895;
++                  String _val896;
++                  for (int _i897 = 0; _i897 < _map894.size; ++_i897)
                    {
--                    _key887 = iprot.readString();
--                    _val888 = iprot.readString();
--                    struct.partitionSpecs.put(_key887, _val888);
++                    _key895 = iprot.readString();
++                    _val896 = iprot.readString();
++                    struct.partitionSpecs.put(_key895, _val896);
                    }
                    iprot.readMapEnd();
                  }
@@@ -81773,10 -82369,10 +82897,10 @@@
            oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC);
            {
              oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size()));
--            for (Map.Entry<String, String> _iter890 : struct.partitionSpecs.entrySet())
++            for (Map.Entry<String, String> _iter898 : struct.partitionSpecs.entrySet())
              {
--              oprot.writeString(_iter890.getKey());
--              oprot.writeString(_iter890.getValue());
++              oprot.writeString(_iter898.getKey());
++              oprot.writeString(_iter898.getValue());
              }
              oprot.writeMapEnd();
            }
@@@ -81839,10 -82435,10 +82963,10 @@@
          if (struct.isSetPartitionSpecs()) {
            {
              oprot.writeI32(struct.partitionSpecs.size());
--            for (Map.Entry<String, String> _iter891 : struct.partitionSpecs.entrySet())
++            for (Map.Entry<String, String> _iter899 : struct.partitionSpecs.entrySet())
              {
--              oprot.writeString(_iter891.getKey());
--              oprot.writeString(_iter891.getValue());
++              oprot.writeString(_iter899.getKey());
++              oprot.writeString(_iter899.getValue());
              }
            }
          }
@@@ -81866,15 -82462,15 +82990,15 @@@
          BitSet incoming = iprot.readBitSet(5);
          if (incoming.get(0)) {
            {
--            org.apache.thrift.protocol.TMap _map892 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
--            struct.partitionSpecs = new HashMap<String,String>(2*_map892.size);
--            String _key893

<TRUNCATED>

[26/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out
index 9929550..dbf90b0 100644
--- a/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out
@@ -143,7 +143,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -368,7 +368,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -593,7 +593,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -812,7 +812,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -1005,7 +1005,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -1198,7 +1198,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -1391,7 +1391,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -1626,7 +1626,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 12792 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_retailprice (type: decimal(38,18))
             Execution mode: vectorized, llap
@@ -1819,7 +1819,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 12792 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: decimal(38,18))
             Execution mode: vectorized, llap
@@ -2032,7 +2032,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_bigint (type: bigint)
             Execution mode: vectorized, llap
@@ -2225,7 +2225,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_bigint (type: bigint)
             Execution mode: vectorized, llap
@@ -2412,7 +2412,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -2576,7 +2576,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -2741,7 +2741,7 @@ STAGE PLANS:
                         className: VectorReduceSinkMultiKeyOperator
                         keyExpressions: VectorUDFAdaptor(CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (null) END)(children: StringGroupColEqualStringScalar(col 0, val Manufacturer#2) -> 3:boolean) -> 4:timestamp
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -2907,7 +2907,7 @@ STAGE PLANS:
                         className: VectorReduceSinkObjectHashOperator
                         keyExpressions: VectorUDFAdaptor(CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (null) END)(children: StringGroupColEqualStringScalar(col 0, val Manufacturer#2) -> 3:boolean) -> 4:timestamp
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 40 Data size: 9048 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_retailprice (type: double)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
index 794e595..4a4e710 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
@@ -145,7 +145,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
index 170ea9c..d937146 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
@@ -145,7 +145,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
index 3d2992f..382841f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
@@ -145,7 +145,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
index 964e63d..7348af8 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
@@ -78,10 +78,9 @@ STAGE PLANS:
                         sort order: ++++
                         Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkMultiKeyOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 6102 Data size: 1440072 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col4 (type: decimal(20,10))
@@ -122,10 +121,9 @@ STAGE PLANS:
                   key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
                   sort order: ++++
                   Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false
+                      className: VectorReduceSinkObjectHashOperator
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 3051 Data size: 720036 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col4 (type: decimal(20,10))

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
index 8b19c58..fb9e121 100644
--- a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
@@ -369,10 +369,9 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkStringOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -410,10 +409,9 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false
+                      className: VectorReduceSinkObjectHashOperator
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out b/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out
index c036d69..6b4ca6c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out
@@ -62,28 +62,44 @@ STAGE PLANS:
                 TableScan
                   alias: orc_decimal
                   Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0]
                   Filter Operator
-                    predicate: (id) IN ('100000000', '200000000') (type: boolean)
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterDoubleColumnInList(col 1, values [1.0E8, 2.0E8])(children: CastDecimalToDouble(col 0) -> 1:double) -> boolean
+                    predicate: (UDFToDouble(id)) IN (1.0E8, 2.0E8) (type: boolean)
                     Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: decimal(18,0))
                       outputColumnNames: _col0
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumns: [0]
                       Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: Predicate expression for FILTER operator: Cannot vectorize IN() - casting a column is not supported. Column type is decimal(18,0) but the common type is string
-                vectorized: false
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out b/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out
index 31b834a..3a431b6 100644
--- a/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_tablesample_rows.q.out
@@ -264,7 +264,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
index b219869..52f78da 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
@@ -93,7 +93,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -213,7 +213,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -335,10 +335,9 @@ STAGE PLANS:
                       Reduce Output Operator
                         sort order: 
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 10 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: int)
@@ -349,7 +348,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out b/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
index 8104f3e..06dde80 100644
--- a/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
@@ -73,7 +73,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_0.q.out b/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
index d2897ba..73fb9b1 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
@@ -63,7 +63,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: tinyint), _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
@@ -103,7 +103,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
@@ -219,7 +219,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -259,7 +259,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: vectorized, llap
@@ -520,7 +520,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
@@ -560,7 +560,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
@@ -676,7 +676,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -716,7 +716,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: vectorized, llap
@@ -977,7 +977,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
@@ -1017,7 +1017,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
@@ -1133,7 +1133,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: double)
             Execution mode: vectorized, llap
@@ -1173,7 +1173,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 3 
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_7.q.out b/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
index 9925959..ba49bed 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
@@ -96,10 +96,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                         sort order: +++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 7281 Data size: 1231410 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -109,7 +108,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -332,10 +331,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                         sort order: +++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 7281 Data size: 1231410 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -345,7 +343,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_8.q.out b/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
index 42f12e9..9e9f2c7 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
@@ -92,10 +92,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: float), _col13 (type: double)
                         sort order: ++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3060 Data size: 557456 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -105,7 +104,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -315,10 +314,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: float), _col13 (type: double)
                         sort order: ++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3060 Data size: 557456 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -328,7 +326,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out b/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out
index 2b5e5a8..2ff5c54 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out
@@ -227,10 +227,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: bigint), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 174720 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: decimal(22,21))
@@ -241,7 +240,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -447,10 +446,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: double), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 65520 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double)
@@ -461,7 +459,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
index c38a215..5701676 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
@@ -64,17 +64,17 @@ POSTHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdoubl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--1887561756	1839.0
 -1887561756	-10011.0
 -1887561756	-13877.0
--1887561756	10361.0
--1887561756	-8881.0
 -1887561756	-2281.0
+-1887561756	-8881.0
+-1887561756	10361.0
+-1887561756	1839.0
 -1887561756	9531.0
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain vectorization detail
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -120,10 +120,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: tinyint), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.3
                         value expressions: _col2 (type: smallint)
@@ -134,18 +133,29 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0, 1, 5]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 3
+                    dataColumns: KEY.reducesinkkey0:tinyint, KEY.reducesinkkey1:double, VALUE._col0:smallint
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: smallint)
@@ -186,10 +196,17 @@ POSTHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyi
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--64	-15920.0	-15920
 -64	-10462.0	-10462
--64	-9842.0	-9842
--64	-8080.0	-8080
+-64	-15920.0	-15920
+-64	-1600.0	-1600
+-64	-200.0	-200
+-64	-2919.0	-2919
+-64	-3097.0	-3097
+-64	-3586.0	-3586
+-64	-4018.0	-4018
+-64	-4040.0	-4040
+-64	-4803.0	-4803
+-64	-6907.0	-6907
 -64	-7196.0	-7196
 -64	-7196.0	-7196
 -64	-7196.0	-7196
@@ -197,19 +214,12 @@ POSTHOOK: Input: default@alltypesorc
 -64	-7196.0	-7196
 -64	-7196.0	-7196
 -64	-7196.0	-7196
--64	-6907.0	-6907
--64	-4803.0	-4803
--64	-4040.0	-4040
--64	-4018.0	-4018
--64	-3586.0	-3586
--64	-3097.0	-3097
--64	-2919.0	-2919
--64	-1600.0	-1600
--64	-200.0	-200
-PREHOOK: query: explain vectorization expression
+-64	-8080.0	-8080
+-64	-9842.0	-9842
+PREHOOK: query: explain vectorization detail
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -276,6 +286,12 @@ STAGE PLANS:
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0, 5]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: double
         Reducer 2 
             Execution mode: llap
             Reduce Vectorization:
@@ -315,30 +331,30 @@ POSTHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	9370.0945309795
--64	373.52941176470586
--63	2178.7272727272725
--62	245.69387755102042
--61	914.3404255319149
--60	1071.82
--59	318.27272727272725
--58	3483.2444444444445
--57	1867.0535714285713
--56	2595.818181818182
--55	2385.595744680851
--54	2712.7272727272725
--53	-532.7567567567568
--52	2810.705882352941
--51	-96.46341463414635
--50	-960.0192307692307
--49	768.7659574468086
--48	1672.909090909091
--47	-574.6428571428571
 -46	3033.55
-PREHOOK: query: explain vectorization expression
+-47	-574.6428571428571
+-48	1672.909090909091
+-49	768.7659574468086
+-50	-960.0192307692307
+-51	-96.46341463414635
+-52	2810.705882352941
+-53	-532.7567567567568
+-54	2712.7272727272725
+-55	2385.595744680851
+-56	2595.818181818182
+-57	1867.0535714285713
+-58	3483.2444444444445
+-59	318.27272727272725
+-60	1071.82
+-61	914.3404255319149
+-62	245.69387755102042
+-63	2178.7272727272725
+-64	373.52941176470586
+NULL	9370.0945309795
+PREHOOK: query: explain vectorization detail
 select distinct(ctinyint) from alltypesorc limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select distinct(ctinyint) from alltypesorc limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -389,10 +405,9 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: tinyint)
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkLongOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 95 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.3
             Execution mode: vectorized, llap
@@ -405,15 +420,26 @@ STAGE PLANS:
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 1
+                    dataColumns: KEY._col0:tinyint
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 Group By Vectorization:
@@ -457,30 +483,30 @@ POSTHOOK: query: select distinct(ctinyint) from alltypesorc limit 20
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL
--64
--63
--62
--61
--60
--59
--58
--57
--56
--55
--54
--53
--52
--51
--50
--49
--48
--47
 -46
-PREHOOK: query: explain vectorization expression
+-47
+-48
+-49
+-50
+-51
+-52
+-53
+-54
+-55
+-56
+-57
+-58
+-59
+-60
+-61
+-62
+-63
+-64
+NULL
+PREHOOK: query: explain vectorization detail
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -533,7 +559,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -545,15 +571,26 @@ STAGE PLANS:
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0, 5]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY._col0:tinyint, KEY._col1:double
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 Group By Vectorization:
@@ -610,30 +647,30 @@ POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc grou
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	2932
--64	24
--63	19
--62	27
--61	25
--60	27
--59	31
--58	23
--57	35
--56	36
--55	29
--54	26
--53	22
--52	33
--51	21
--50	30
--49	26
--48	29
--47	22
 -46	24
-PREHOOK: query: explain vectorization expression
+-47	22
+-48	29
+-49	26
+-50	30
+-51	21
+-52	33
+-53	22
+-54	26
+-55	29
+-56	36
+-57	35
+-58	23
+-59	31
+-60	27
+-61	25
+-62	27
+-63	19
+-64	24
+NULL	2932
+PREHOOK: query: explain vectorization detail
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -658,10 +695,10 @@ POSTHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limi
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain vectorization detail
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -696,37 +733,29 @@ STAGE PLANS:
                         predicateExpression: SelectColumnIsNotNull(col 0) -> boolean
                     predicate: ctinyint is not null (type: boolean)
                     Statistics: Num rows: 9173 Data size: 82188 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: cdouble (type: double), ctinyint (type: tinyint)
-                      outputColumnNames: cdouble, ctinyint
-                      Select Vectorization:
-                          className: VectorSelectOperator
-                          native: true
-                          projectedOutputColumns: [5, 0]
-                      Statistics: Num rows: 9173 Data size: 82188 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        aggregations: sum(ctinyint)
-                        Group By Vectorization:
-                            aggregators: VectorUDAFSumLong(col 0) -> bigint
-                            className: VectorGroupByOperator
-                            vectorOutput: true
-                            keyExpressions: col 5
-                            native: false
-                            projectedOutputColumns: [0]
-                        keys: cdouble (type: double)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                    Group By Operator
+                      aggregations: sum(ctinyint)
+                      Group By Vectorization:
+                          aggregators: VectorUDAFSumLong(col 0) -> bigint
+                          className: VectorGroupByOperator
+                          vectorOutput: true
+                          keyExpressions: col 5
+                          native: false
+                          projectedOutputColumns: [0]
+                      keys: cdouble (type: double)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 3185 Data size: 44512 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: double)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: double)
+                        Reduce Sink Vectorization:
+                            className: VectorReduceSinkMultiKeyOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3185 Data size: 44512 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: double)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: double)
-                          Reduce Sink Vectorization:
-                              className: VectorReduceSinkMultiKeyOperator
-                              native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          Statistics: Num rows: 3185 Data size: 44512 Basic stats: COMPLETE Column stats: COMPLETE
-                          value expressions: _col1 (type: bigint)
+                        value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -737,15 +766,26 @@ STAGE PLANS:
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0, 5]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY._col0:double, VALUE._col0:bigint
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
@@ -764,10 +804,9 @@ STAGE PLANS:
                   key expressions: _col1 (type: bigint), _col0 (type: double)
                   sort order: ++
                   Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false
+                      className: VectorReduceSinkObjectHashOperator
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 3185 Data size: 44512 Basic stats: COMPLETE Column stats: COMPLETE
                   TopN Hash Memory Usage: 0.3
         Reducer 3 
@@ -775,10 +814,16 @@ STAGE PLANS:
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY.reducesinkkey0:bigint, KEY.reducesinkkey1:double
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: double), KEY.reducesinkkey0 (type: bigint)
@@ -819,23 +864,23 @@ POSTHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where cti
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	-32768
+-10462.0	-64
+-1121.0	-89
+-11322.0	-101
+-11492.0	-78
+-15920.0	-64
+-4803.0	-64
+-6907.0	-64
 -7196.0	-2009
+-8080.0	-64
+-8118.0	-80
+-9842.0	-64
+10496.0	-67
 15601.0	-1733
-4811.0	-115
--11322.0	-101
--1121.0	-89
-7705.0	-88
 3520.0	-86
--8118.0	-80
+4811.0	-115
 5241.0	-80
--11492.0	-78
-9452.0	-76
 557.0	-75
-10496.0	-67
--15920.0	-64
--10462.0	-64
--9842.0	-64
--8080.0	-64
--6907.0	-64
--4803.0	-64
+7705.0	-88
+9452.0	-76
+NULL	-32768

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorization_offset_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_offset_limit.q.out b/ql/src/test/results/clientpositive/llap/vectorization_offset_limit.q.out
index f1fe221..bdfe78e 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_offset_limit.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_offset_limit.q.out
@@ -116,10 +116,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: tinyint), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: smallint)
@@ -130,7 +129,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
index 872e7f3..85c4dd0 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
@@ -87,7 +87,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 


[25/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
index f0b28fa..c141e36 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
@@ -1022,10 +1022,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: timestamp), _col3 (type: string), _col4 (type: boolean), _col5 (type: tinyint), _col6 (type: float), _col7 (type: timestamp), _col8 (type: smallint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: int), _col12 (type: decimal(14,3)), _col13 (type: smallint), _col14 (type: smallint), _col15 (type: smallint), _col16 (type: double), _col17 (type: decimal(15,3)), _col18 (type: float), _col19 (type: double), _col20 (type: double), _col21 (type: tinyint), _col22 (type: decimal(9,7))
                         sort order: +++++++++++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9898 Data size: 5632662 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -1035,7 +1034,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -1321,10 +1320,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: boolean), _col4 (type: float), _col5 (type: double), _col6 (type: timestamp), _col7 (type: smallint), _col8 (type: string), _col9 (type: boolean), _col10 (type: double), _col11 (type: decimal(5,3)), _col12 (type: double), _col13 (type: float), _col14 (type: float), _col15 (type: float), _col16 (type: float), _col17 (type: double), _col18 (type: double), _col19 (type: bigint), _col20 (type: double), _col21 (type: smallint), _col22 (type: bigint), _col23 (type: double), _col24 (type: smallint)
                         sort order: +++++++++++++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 8195 Data size: 3349694 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -1334,7 +1332,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -1569,10 +1567,9 @@ STAGE PLANS:
                         key expressions: _col8 (type: boolean), _col1 (type: string), _col3 (type: timestamp), _col5 (type: float), _col6 (type: bigint), _col1 (type: string), _col4 (type: double), _col0 (type: int), _col7 (type: smallint), _col4 (type: double), _col9 (type: int), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: float), _col13 (type: bigint), _col14 (type: double), _col15 (type: double), _col16 (type: bigint), _col17 (type: double), _col18 (type: decimal(8,7)), _col19 (type: double), _col20 (type: smallint), _col21 (type: int)
                         sort order: +++++++++++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 10922 Data size: 3594034 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: boolean)
@@ -1583,7 +1580,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -1875,10 +1872,9 @@ STAGE PLANS:
                         key expressions: _col5 (type: smallint), _col1 (type: string), _col2 (type: double), _col3 (type: float), _col4 (type: bigint), _col6 (type: double), _col7 (type: int), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: int), _col14 (type: double), _col15 (type: double)
                         sort order: +++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3868 Data size: 748844 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: timestamp)
@@ -1889,7 +1885,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -3286,7 +3282,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -3400,7 +3396,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -3586,7 +3582,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -3700,7 +3696,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -3814,7 +3810,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -3928,7 +3924,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -4042,7 +4038,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -4156,7 +4152,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out
index 3f0afcb..855d2e8 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_bucketmapjoin1.q.out
@@ -138,7 +138,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
@@ -173,7 +173,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
@@ -273,7 +273,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
@@ -393,7 +393,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap
@@ -428,7 +428,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 52 Basic stats: COMPLETE Column stats: NONE
                       value expressions: value (type: string)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_case.q.out b/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
index 940b36e..ab75515 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
@@ -301,7 +301,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -428,7 +428,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
index c98ea9c..952c82a 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
@@ -1272,7 +1272,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: date), _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap
@@ -1312,7 +1312,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out
index 5a7a101..8f83ee2 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 90000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -121,7 +121,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 57 Data size: 10182 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: int)
@@ -147,7 +147,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
             Execution mode: vectorized, llap
@@ -240,7 +240,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
 
@@ -315,7 +315,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 90000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -358,7 +358,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 57 Data size: 10182 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: string)
@@ -384,7 +384,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
             Execution mode: vectorized, llap
@@ -477,7 +477,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
 
@@ -552,7 +552,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 90000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -595,7 +595,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 57 Data size: 10182 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: string)
@@ -621,7 +621,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
             Execution mode: vectorized, llap
@@ -714,7 +714,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
 
@@ -790,7 +790,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 90000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -833,7 +833,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 57 Data size: 10182 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: int)
@@ -859,7 +859,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
             Execution mode: vectorized, llap
@@ -903,7 +903,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 57 Data size: 10182 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: int)
@@ -929,7 +929,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
             Execution mode: vectorized, llap
@@ -1024,7 +1024,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
         Reducer 7 
@@ -1053,7 +1053,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
 
@@ -1129,7 +1129,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 90000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1172,7 +1172,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 57 Data size: 10182 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: string)
@@ -1198,7 +1198,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
                       Select Operator
@@ -1225,7 +1225,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
             Execution mode: vectorized, llap
@@ -1318,7 +1318,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
         Reducer 6 
@@ -1347,7 +1347,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
 
@@ -1422,7 +1422,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 90000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1465,7 +1465,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 29 Data size: 5180 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
                         expressions: _col0 (type: int)
@@ -1491,7 +1491,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
             Execution mode: vectorized, llap
@@ -1584,7 +1584,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: binary)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
index 1c72876..0f02856 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
@@ -125,7 +125,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
index 37eb47e..b96dace 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin2.q.out
@@ -103,7 +103,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -146,7 +146,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 45 Data size: 181 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
index df4b0d8..6086e03 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
@@ -156,7 +156,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -372,7 +372,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                       value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
             Execution mode: vectorized, llap
@@ -412,7 +412,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -616,7 +616,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized, llap
@@ -770,7 +770,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -984,7 +984,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized, llap
@@ -1201,7 +1201,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized, llap
@@ -1420,7 +1420,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_partkey (type: int), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
             Execution mode: vectorized, llap
@@ -1460,7 +1460,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1634,7 +1634,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1666,7 +1666,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_partkey (type: int), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
             Execution mode: vectorized, llap
@@ -2245,7 +2245,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -2458,7 +2458,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -2741,7 +2741,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -2958,7 +2958,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_partkey (type: int), p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -2998,7 +2998,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkLongOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -3242,7 +3242,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized, llap
@@ -3473,7 +3473,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: double)
             Execution mode: vectorized, llap
@@ -3729,7 +3729,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized, llap
@@ -4165,7 +4165,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_size (type: int)
             Execution mode: vectorized, llap
@@ -4479,7 +4479,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_size (type: int)
             Execution mode: vectorized, llap
@@ -4789,7 +4789,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkMultiKeyOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized, llap
@@ -5075,7 +5075,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkMultiKeyOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized, llap
@@ -5404,7 +5404,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkMultiKeyOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized, llap
@@ -5702,7 +5702,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkMultiKeyOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
index 7f04eba..3c972cc 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
@@ -54,7 +54,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -96,7 +96,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 27396 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs


[16/50] [abbrv] hive git commit: HIVE-16577: Syntax error in the metastore init scripts for mssql (Vihang Karajgaonkar, reviewed by Aihua Xu & Thejas M Nair)

Posted by we...@apache.org.
HIVE-16577: Syntax error in the metastore init scripts for mssql (Vihang Karajgaonkar, reviewed by Aihua Xu & Thejas M Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d09f3f81
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d09f3f81
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d09f3f81

Branch: refs/heads/hive-14535
Commit: d09f3f81d231e68727bbb39681a686c5f525114a
Parents: 1fecb81
Author: Aihua Xu <ai...@apache.org>
Authored: Fri May 5 14:00:51 2017 -0400
Committer: Aihua Xu <ai...@apache.org>
Committed: Fri May 5 14:00:51 2017 -0400

----------------------------------------------------------------------
 metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql | 2 +-
 metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql | 2 +-
 metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d09f3f81/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
index 57dd30f..33730de 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
@@ -579,7 +579,7 @@ CREATE TABLE NOTIFICATION_LOG
     EVENT_TYPE nvarchar(32) NOT NULL,
     DB_NAME nvarchar(128) NULL,
     TBL_NAME nvarchar(256) NULL,
-    MESSAGE_FORMAT nvarchar(16)
+    MESSAGE_FORMAT nvarchar(16),
     MESSAGE text NULL
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d09f3f81/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql
index 8a80a50..c117a32 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-2.3.0.mssql.sql
@@ -579,7 +579,7 @@ CREATE TABLE NOTIFICATION_LOG
     EVENT_TYPE nvarchar(32) NOT NULL,
     DB_NAME nvarchar(128) NULL,
     TBL_NAME nvarchar(256) NULL,
-    MESSAGE_FORMAT nvarchar(16)
+    MESSAGE_FORMAT nvarchar(16),
     MESSAGE text NULL
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/d09f3f81/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
index 98682a8..54d593c 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
@@ -579,7 +579,7 @@ CREATE TABLE NOTIFICATION_LOG
     EVENT_TYPE nvarchar(32) NOT NULL,
     DB_NAME nvarchar(128) NULL,
     TBL_NAME nvarchar(256) NULL,
-    MESSAGE_FORMAT nvarchar(16)
+    MESSAGE_FORMAT nvarchar(16),
     MESSAGE text NULL
 );
 


[32/50] [abbrv] hive git commit: HIVE-16562: Issues with nullif / fetch task (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

Posted by we...@apache.org.
HIVE-16562: Issues with nullif / fetch task (Zoltan Haindrich, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0f8840a3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0f8840a3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0f8840a3

Branch: refs/heads/hive-14535
Commit: 0f8840a31a73f7b3278735ea4fb9cd4b0f3ae8d3
Parents: ec8c390
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Mon May 8 07:21:40 2017 +0200
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Mon May 8 07:44:07 2017 +0200

----------------------------------------------------------------------
 .../hive/ql/udf/generic/GenericUDFNullif.java   | 10 ++++--
 .../ql/udf/generic/TestGenericUDFNullif.java    | 20 +++++++++++
 ql/src/test/queries/clientpositive/udf_nullif.q | 11 ++++++
 .../results/clientpositive/udf_nullif.q.out     | 37 ++++++++++++++++++++
 4 files changed, 75 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0f8840a3/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java
index 452c84e..5020ef8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFNullif.java
@@ -86,16 +86,20 @@ public class GenericUDFNullif extends GenericUDF {
   public Object evaluate(DeferredObject[] arguments) throws HiveException {
     Object arg0 = arguments[0].get();
     Object arg1 = arguments[1].get();
+    Object value0 = null;
+    if (arg0 != null) {
+      value0 = returnOIResolver.convertIfNecessary(arg0, argumentOIs[0], false);
+    }
     if (arg0 == null || arg1 == null) {
-      return arg0;
+      return value0;
     }
     PrimitiveObjectInspector compareOI = (PrimitiveObjectInspector) returnOIResolver.get();
     if (PrimitiveObjectInspectorUtils.comparePrimitiveObjects(
-        arg0, compareOI,
+        value0, compareOI,
         returnOIResolver.convertIfNecessary(arg1, argumentOIs[1], false), compareOI)) {
       return null;
     }
-    return arg0;
+    return value0;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/0f8840a3/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java
index a66e63e..3e6efd4 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFNullif.java
@@ -24,6 +24,8 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
+import org.apache.hadoop.hive.serde2.lazy.LazyInteger;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyPrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
@@ -129,4 +131,22 @@ public class TestGenericUDFNullif {
     Assert.assertEquals(TypeInfoFactory.dateTypeInfo, oi.getTypeInfo());
     Assert.assertEquals(null, udf.evaluate(args));
   }
+
+  @Test
+  public void testLazy() throws HiveException {
+    GenericUDFNullif udf = new GenericUDFNullif();
+
+    ObjectInspector[] inputOIs = { LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR,
+        LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR };
+    LazyInteger a1 = new LazyInteger(LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR);
+    LazyInteger a2 = new LazyInteger(LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR);
+    a1.getWritableObject().set(1);
+    a2.getWritableObject().set(1);
+
+    DeferredObject[] args = { new DeferredJavaObject(a1), new DeferredJavaObject(a2) };
+
+    PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
+    Assert.assertEquals(TypeInfoFactory.intTypeInfo, oi.getTypeInfo());
+    Assert.assertEquals(null, udf.evaluate(args));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0f8840a3/ql/src/test/queries/clientpositive/udf_nullif.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_nullif.q b/ql/src/test/queries/clientpositive/udf_nullif.q
index 8632837..00dfa23 100644
--- a/ql/src/test/queries/clientpositive/udf_nullif.q
+++ b/ql/src/test/queries/clientpositive/udf_nullif.q
@@ -16,3 +16,14 @@ select	nullif(date('2011-11-11'),date('2011-11-22'));
 select	nullif(1,null);
 
 select	nullif(1.0,1);
+
+
+set hive.fetch.task.conversion=more;
+
+drop table if exists t0;
+create table t0(a int,b int,c float,d double precision);
+insert into t0 values(1,2,3.1,4.1);
+select	nullif(a,b),
+	nullif(b,c),
+	nullif(c,d),
+	nullif(d,a) from t0;

http://git-wip-us.apache.org/repos/asf/hive/blob/0f8840a3/ql/src/test/results/clientpositive/udf_nullif.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_nullif.q.out b/ql/src/test/results/clientpositive/udf_nullif.q.out
index 12cfbec..65827bc 100644
--- a/ql/src/test/results/clientpositive/udf_nullif.q.out
+++ b/ql/src/test/results/clientpositive/udf_nullif.q.out
@@ -168,3 +168,40 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 #### A masked pattern was here ####
 NULL
+PREHOOK: query: drop table if exists t0
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists t0
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table t0(a int,b int,c float,d double precision)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t0
+POSTHOOK: query: create table t0(a int,b int,c float,d double precision)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t0
+PREHOOK: query: insert into t0 values(1,2,3.1,4.1)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@t0
+POSTHOOK: query: insert into t0 values(1,2,3.1,4.1)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@t0
+POSTHOOK: Lineage: t0.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: t0.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: t0.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: t0.d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+PREHOOK: query: select	nullif(a,b),
+	nullif(b,c),
+	nullif(c,d),
+	nullif(d,a) from t0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t0
+#### A masked pattern was here ####
+POSTHOOK: query: select	nullif(a,b),
+	nullif(b,c),
+	nullif(c,d),
+	nullif(d,a) from t0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t0
+#### A masked pattern was here ####
+1	2	3.1	4.1


[31/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ec8c390e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ec8c390e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ec8c390e

Branch: refs/heads/hive-14535
Commit: ec8c390ee52931169d0b63bae3f8b3886170e8b8
Parents: 9d4f13a
Author: Matt McCline <mm...@hortonworks.com>
Authored: Mon May 8 00:37:48 2017 -0500
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Mon May 8 00:37:48 2017 -0500

----------------------------------------------------------------------
 .../VectorReduceSinkCommonOperator.java         |  51 ++-
 .../VectorReduceSinkObjectHashOperator.java     |   1 -
 .../ql/exec/vector/udf/VectorUDFAdaptor.java    |  23 ++
 .../hive/ql/optimizer/physical/Vectorizer.java  |   9 +-
 .../hadoop/hive/ql/plan/ReduceSinkDesc.java     |   4 +-
 .../hive/ql/plan/VectorReduceSinkDesc.java      |  10 +-
 .../clientpositive/vectorization_limit.q        |  14 +-
 .../llap/llap_vector_nohybridgrace.q.out        |   8 +-
 .../llap/vector_adaptor_usage_mode.q.out        |   2 +-
 .../llap/vector_auto_smb_mapjoin_14.q.out       |  10 +-
 .../llap/vector_between_columns.q.out           |   4 +-
 .../clientpositive/llap/vector_between_in.q.out |  32 +-
 .../llap/vector_binary_join_groupby.q.out       |  12 +-
 .../clientpositive/llap/vector_bucket.q.out     |   2 +-
 .../clientpositive/llap/vector_char_2.q.out     |  28 +-
 .../llap/vector_char_mapjoin1.q.out             |  12 +-
 .../llap/vector_char_simple.q.out               |  13 +-
 .../clientpositive/llap/vector_coalesce.q.out   |  45 +--
 .../clientpositive/llap/vector_coalesce_2.q.out |   2 +-
 .../clientpositive/llap/vector_count.q.out      |   8 +-
 .../llap/vector_count_distinct.q.out            |   4 +-
 .../clientpositive/llap/vector_data_types.q.out |   9 +-
 .../llap/vector_decimal_aggregate.q.out         |   2 +-
 .../llap/vector_decimal_expressions.q.out       |   9 +-
 .../llap/vector_decimal_mapjoin.q.out           |   2 +-
 .../llap/vector_decimal_round.q.out             |   8 +-
 .../llap/vector_decimal_round_2.q.out           |   8 +-
 .../clientpositive/llap/vector_distinct_2.q.out |   2 +-
 .../llap/vector_empty_where.q.out               |  16 +-
 .../clientpositive/llap/vector_groupby4.q.out   |   4 +-
 .../clientpositive/llap/vector_groupby6.q.out   |   4 +-
 .../clientpositive/llap/vector_groupby_3.q.out  |   2 +-
 .../llap/vector_groupby_grouping_id3.q.out      |   4 +-
 .../llap/vector_groupby_mapjoin.q.out           |  10 +-
 .../llap/vector_groupby_reduce.q.out            |  26 +-
 .../llap/vector_grouping_sets.q.out             |   4 +-
 .../clientpositive/llap/vector_if_expr.q.out    |   2 +-
 .../llap/vector_include_no_sel.q.out            |   4 +-
 .../clientpositive/llap/vector_inner_join.q.out |  18 +-
 .../clientpositive/llap/vector_interval_1.q.out |  16 +-
 .../clientpositive/llap/vector_interval_2.q.out |  20 +-
 .../llap/vector_interval_arithmetic.q.out       |  24 +-
 .../llap/vector_interval_mapjoin.q.out          |   2 +-
 .../clientpositive/llap/vector_join30.q.out     |  84 ++---
 .../llap/vector_join_part_col_char.q.out        |  18 +-
 .../llap/vector_left_outer_join2.q.out          |   8 +-
 .../llap/vector_leftsemi_mapjoin.q.out          | 376 +++++++++----------
 .../llap/vector_mapjoin_reduce.q.out            |  12 +-
 .../llap/vector_non_constant_in_expr.q.out      |   4 +-
 .../llap/vector_non_string_partition.q.out      |  18 +-
 .../llap/vector_nullsafe_join.q.out             |  36 +-
 .../llap/vector_number_compare_projection.q.out |   4 +-
 .../clientpositive/llap/vector_order_null.q.out |  22 +-
 .../clientpositive/llap/vector_orderby_5.q.out  |   4 +-
 .../llap/vector_outer_join0.q.out               |   4 +-
 .../llap/vector_outer_join1.q.out               |  10 +-
 .../llap/vector_outer_join2.q.out               |   6 +-
 .../llap/vector_partition_diff_num_cols.q.out   |  10 +-
 .../llap/vector_partitioned_date_time.q.out     |  60 ++-
 .../llap/vector_ptf_part_simple.q.out           |  30 +-
 .../clientpositive/llap/vector_reduce1.q.out    |   2 +-
 .../clientpositive/llap/vector_reduce2.q.out    |   2 +-
 .../clientpositive/llap/vector_reduce3.q.out    |   2 +-
 .../llap/vector_reduce_groupby_decimal.q.out    |  14 +-
 .../llap/vector_string_concat.q.out             |  14 +-
 .../llap/vector_string_decimal.q.out            |  24 +-
 .../llap/vector_tablesample_rows.q.out          |   2 +-
 .../llap/vector_varchar_simple.q.out            |  13 +-
 .../llap/vector_when_case_null.q.out            |   2 +-
 .../clientpositive/llap/vectorization_0.q.out   |  24 +-
 .../clientpositive/llap/vectorization_7.q.out   |  18 +-
 .../clientpositive/llap/vectorization_8.q.out   |  18 +-
 .../llap/vectorization_div0.q.out               |  18 +-
 .../llap/vectorization_limit.q.out              | 331 +++++++++-------
 .../llap/vectorization_offset_limit.q.out       |   9 +-
 .../llap/vectorization_part_project.q.out       |   2 +-
 .../llap/vectorization_short_regress.q.out      |  52 ++-
 .../llap/vectorized_bucketmapjoin1.q.out        |  10 +-
 .../clientpositive/llap/vectorized_case.q.out   |   4 +-
 .../llap/vectorized_date_funcs.q.out            |   4 +-
 .../vectorized_dynamic_semijoin_reduction.q.out |  58 +--
 .../llap/vectorized_mapjoin.q.out               |   2 +-
 .../llap/vectorized_mapjoin2.q.out              |   4 +-
 .../clientpositive/llap/vectorized_ptf.q.out    |  50 +--
 .../llap/vectorized_shufflejoin.q.out           |   4 +-
 .../llap/vectorized_timestamp_funcs.q.out       |  10 +-
 .../spark/vector_between_in.q.out               |  32 +-
 .../spark/vector_count_distinct.q.out           |   4 +-
 .../spark/vector_data_types.q.out               |   9 +-
 .../spark/vector_decimal_aggregate.q.out        |   2 +-
 .../spark/vector_distinct_2.q.out               |   2 +-
 .../clientpositive/spark/vector_groupby_3.q.out |   2 +-
 .../spark/vector_mapjoin_reduce.q.out           |   4 +-
 .../clientpositive/spark/vector_orderby_5.q.out |   4 +-
 .../spark/vector_outer_join1.q.out              |   2 +-
 .../spark/vector_outer_join2.q.out              |   2 +-
 .../spark/vector_string_concat.q.out            |  14 +-
 .../clientpositive/spark/vectorization_0.q.out  |  24 +-
 .../clientpositive/spark/vectorization_7.q.out  |  18 +-
 .../clientpositive/spark/vectorization_8.q.out  |  18 +-
 .../spark/vectorization_div0.q.out              |  18 +-
 .../spark/vectorization_part_project.q.out      |   2 +-
 .../spark/vectorization_short_regress.q.out     |  52 ++-
 .../clientpositive/spark/vectorized_case.q.out  |   4 +-
 .../clientpositive/spark/vectorized_ptf.q.out   |  50 +--
 .../spark/vectorized_shufflejoin.q.out          |   4 +-
 .../spark/vectorized_timestamp_funcs.q.out      |  10 +-
 .../tez/vector_non_string_partition.q.out       |  18 +-
 .../clientpositive/tez/vectorization_div0.q.out |  18 +-
 .../tez/vectorization_limit.q.out               | 281 ++++++++------
 .../vector_binary_join_groupby.q.out            |   8 +-
 .../results/clientpositive/vector_bucket.q.out  |   2 +-
 .../clientpositive/vector_cast_constant.q.out   |   4 +-
 .../results/clientpositive/vector_char_2.q.out  |  16 +-
 .../clientpositive/vector_char_mapjoin1.q.out   |   6 +-
 .../clientpositive/vector_char_simple.q.out     |   4 +-
 .../clientpositive/vector_coalesce.q.out        |  20 +-
 .../clientpositive/vector_coalesce_2.q.out      |   2 +-
 .../results/clientpositive/vector_count.q.out   |   8 +-
 .../clientpositive/vector_data_types.q.out      |   4 +-
 .../vector_decimal_aggregate.q.out              |   2 +-
 .../vector_decimal_expressions.q.out            |   4 +-
 .../clientpositive/vector_decimal_round.q.out   |   8 +-
 .../clientpositive/vector_decimal_round_2.q.out |   8 +-
 .../clientpositive/vector_distinct_2.q.out      |   2 +-
 .../clientpositive/vector_empty_where.q.out     |   8 +-
 .../clientpositive/vector_groupby4.q.out        |   4 +-
 .../clientpositive/vector_groupby6.q.out        |   4 +-
 .../clientpositive/vector_groupby_3.q.out       |   2 +-
 .../clientpositive/vector_groupby_mapjoin.q.out |   6 +-
 .../clientpositive/vector_groupby_reduce.q.out  |  20 +-
 .../clientpositive/vector_grouping_sets.q.out   |   4 +-
 .../results/clientpositive/vector_if_expr.q.out |   2 +-
 .../clientpositive/vector_include_no_sel.q.out  |   2 +-
 .../clientpositive/vector_interval_1.q.out      |  16 +-
 .../vector_interval_arithmetic.q.out            |  16 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |   4 +-
 .../vector_non_string_partition.q.out           |   8 +-
 .../clientpositive/vector_order_null.q.out      |  22 +-
 .../clientpositive/vector_orderby_5.q.out       |   4 +-
 .../clientpositive/vector_outer_join1.q.out     |   2 +-
 .../clientpositive/vector_outer_join2.q.out     |   2 +-
 .../clientpositive/vector_outer_join3.q.out     |   6 +-
 .../clientpositive/vector_outer_join4.q.out     |   2 +-
 .../results/clientpositive/vector_reduce1.q.out |   2 +-
 .../results/clientpositive/vector_reduce2.q.out |   2 +-
 .../results/clientpositive/vector_reduce3.q.out |   2 +-
 .../vector_reduce_groupby_decimal.q.out         |   8 +-
 .../clientpositive/vector_string_concat.q.out   |   8 +-
 .../vector_tablesample_rows.q.out               |   2 +-
 .../clientpositive/vector_varchar_simple.q.out  |   4 +-
 .../clientpositive/vector_when_case_null.q.out  |   2 +-
 .../clientpositive/vectorization_13.q.out       |   8 +-
 .../clientpositive/vectorization_7.q.out        |   8 +-
 .../clientpositive/vectorization_8.q.out        |   8 +-
 .../clientpositive/vectorization_div0.q.out     |   8 +-
 .../clientpositive/vectorization_limit.q.out    | 247 ++++++------
 .../vectorization_offset_limit.q.out            |   4 +-
 .../clientpositive/vectorized_case.q.out        |   4 +-
 .../clientpositive/vectorized_date_funcs.q.out  |   4 +-
 .../clientpositive/vectorized_mapjoin2.q.out    |   2 +-
 .../vectorized_parquet_types.q.out              |   2 +-
 .../clientpositive/vectorized_shufflejoin.q.out |   2 +-
 .../clientpositive/vectorized_timestamp.q.out   |   2 +-
 .../vectorized_timestamp_funcs.q.out            |  10 +-
 165 files changed, 1609 insertions(+), 1445 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
index fc5aea5..a4dbe0b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.exec.vector.reducesink;
 
 import java.io.IOException;
+import java.io.Serializable;
 import java.util.Arrays;
 import java.util.Properties;
 
@@ -29,6 +30,7 @@ import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator.Counter;
 import org.apache.hadoop.hive.ql.exec.TerminalOperator;
+import org.apache.hadoop.hive.ql.exec.TopNHash;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
@@ -57,11 +59,13 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hive.common.util.HashCodeUtil;
 
+import com.google.common.base.Preconditions;
+
 /**
  * This class is common operator class for native vectorized reduce sink.
  */
 public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<ReduceSinkDesc>
-    implements VectorizationContextRegion {
+    implements Serializable, TopNHash.BinaryCollector, VectorizationContextRegion {
 
   private static final long serialVersionUID = 1L;
   private static final String CLASS_NAME = VectorReduceSinkCommonOperator.class.getName();
@@ -122,6 +126,9 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
   protected transient HiveKey keyWritable;
   protected transient BytesWritable valueBytesWritable;
 
+  // Picks topN K:V pairs from input.
+  protected transient TopNHash reducerHash;
+
   // Where to write our key and value pairs.
   private transient OutputCollector out;
 
@@ -329,10 +336,46 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
 
     valueBytesWritable = new BytesWritable();
 
+    int limit = conf.getTopN();
+    float memUsage = conf.getTopNMemoryUsage();
+
+    if (limit >= 0 && memUsage > 0) {
+      reducerHash = new TopNHash();
+      reducerHash.initialize(limit, memUsage, conf.isMapGroupBy(), this, conf, hconf);
+    }
+
     batchCounter = 0;
   }
 
-  protected void collect(BytesWritable keyWritable, Writable valueWritable) throws IOException {
+  // The collect method override for TopNHash.BinaryCollector
+  @Override
+  public void collect(byte[] key, byte[] value, int hash) throws IOException {
+    HiveKey keyWritable = new HiveKey(key, hash);
+    BytesWritable valueWritable = new BytesWritable(value);
+    doCollect(keyWritable, valueWritable);
+  }
+
+  protected void collect(HiveKey keyWritable, BytesWritable valueWritable)
+      throws HiveException, IOException {
+    if (reducerHash != null) {
+      // NOTE: partColsIsNull is only used for PTF, which isn't supported yet.
+      final int firstIndex =
+          reducerHash.tryStoreKey(keyWritable, /* partColsIsNull */ false);
+
+      if (firstIndex == TopNHash.EXCLUDE) return;   // Nothing to do.
+
+      if (firstIndex == TopNHash.FORWARD) {
+        doCollect(keyWritable, valueWritable);
+      } else {
+        Preconditions.checkState(firstIndex >= 0);
+        reducerHash.storeValue(firstIndex, keyWritable.hashCode(), valueWritable, false);
+      }
+    } else {
+      doCollect(keyWritable, valueWritable);
+    }
+  }
+
+  private void doCollect(HiveKey keyWritable, BytesWritable valueWritable) throws IOException {
     // Since this is a terminal operator, update counters explicitly -
     // forward is not called
     if (null != out) {
@@ -360,8 +403,12 @@ public abstract class VectorReduceSinkCommonOperator extends TerminalOperator<Re
 
   @Override
   protected void closeOp(boolean abort) throws HiveException {
+    if (!abort && reducerHash != null) {
+      reducerHash.flush();
+    }
     super.closeOp(abort);
     out = null;
+    reducerHash = null;
     if (isLogInfoEnabled) {
       LOG.info(toString() + ": records written - " + numRows);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java
index 6312c44..bd7d6cb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkObjectHashOperator.java
@@ -177,7 +177,6 @@ public class VectorReduceSinkObjectHashOperator extends VectorReduceSinkCommonOp
       nonPartitionRandom = new Random(12345);
     } else {
       partitionObjectInspectors = getObjectInspectorArray(reduceSinkPartitionTypeInfos);
-      LOG.debug("*NEW* partitionObjectInspectors " + Arrays.toString(partitionObjectInspectors));
       partitionVectorExtractRow = new VectorExtractRow();
       partitionVectorExtractRow.init(reduceSinkPartitionTypeInfos, reduceSinkPartitionColumnMap);
       partitionFieldValues = new Object[reduceSinkPartitionTypeInfos.length];

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
index 0bb0f22..3b3624d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hive.ql.exec.vector.udf;
 import java.sql.Date;
 import java.sql.Timestamp;
 
+import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.ql.exec.MapredContext;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
@@ -39,6 +41,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.*;
 import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableBinaryObjectInspector;
+import org.apache.hadoop.hive.serde2.io.HiveIntervalDayTimeWritable;
+import org.apache.hadoop.hive.serde2.io.HiveIntervalYearMonthWritable;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
@@ -336,6 +340,25 @@ public class VectorUDFAdaptor extends VectorExpression {
       BytesWritable bw = (BytesWritable) value;
       BytesColumnVector bv = (BytesColumnVector) colVec;
       bv.setVal(i, bw.getBytes(), 0, bw.getLength());
+    } else if (outputOI instanceof WritableHiveIntervalYearMonthObjectInspector) {
+      LongColumnVector lv = (LongColumnVector) colVec;
+      HiveIntervalYearMonth iym;
+      if (value instanceof HiveIntervalYearMonth) {
+        iym = (HiveIntervalYearMonth) value;
+      } else {
+        iym = ((WritableHiveIntervalYearMonthObjectInspector) outputOI).getPrimitiveJavaObject(value);
+      }
+      long l = iym.getTotalMonths();
+      lv.vector[i] = l;
+    } else if (outputOI instanceof WritableHiveIntervalDayTimeObjectInspector) {
+      IntervalDayTimeColumnVector idtv = (IntervalDayTimeColumnVector) colVec;
+      HiveIntervalDayTime idt;
+      if (value instanceof HiveIntervalDayTime) {
+        idt = (HiveIntervalDayTime) value;
+      } else {
+        idt = ((WritableHiveIntervalDayTimeObjectInspector) outputOI).getPrimitiveJavaObject(value);
+      }
+      idtv.set(i, idt);
     } else {
       throw new RuntimeException("Unhandled object type " + outputOI.getTypeName() +
           " inspector class " + outputOI.getClass().getName() +

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index f0df2e9..2025c24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -3025,7 +3025,10 @@ public class Vectorizer implements PhysicalPlanResolver {
 
     String engine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE);
 
-    boolean hasTopN = (desc.getTopN() >= 0);
+    int limit = desc.getTopN();
+    float memUsage = desc.getTopNMemoryUsage();
+
+    boolean hasPTFTopN = (limit >= 0 && memUsage > 0 && desc.isPTFReduceSink());
 
     boolean hasDistinctColumns = (desc.getDistinctColumnIndices().size() > 0);
 
@@ -3202,7 +3205,7 @@ public class Vectorizer implements PhysicalPlanResolver {
     // Remember the condition variables for EXPLAIN regardless.
     vectorDesc.setIsVectorizationReduceSinkNativeEnabled(isVectorizationReduceSinkNativeEnabled);
     vectorDesc.setEngine(engine);
-    vectorDesc.setHasTopN(hasTopN);
+    vectorDesc.setHasPTFTopN(hasPTFTopN);
     vectorDesc.setHasDistinctColumns(hasDistinctColumns);
     vectorDesc.setIsKeyBinarySortable(isKeyBinarySortable);
     vectorDesc.setIsValueLazyBinary(isValueLazyBinary);
@@ -3215,7 +3218,7 @@ public class Vectorizer implements PhysicalPlanResolver {
     if (!isVectorizationReduceSinkNativeEnabled ||
         !isTezOrSpark ||
         (useUniformHash && (hasEmptyBuckets || hasNoPartitions)) ||
-        hasTopN ||
+        hasPTFTopN ||
         hasDistinctColumns ||
         !isKeyBinarySortable ||
         !isValueLazyBinary ||

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
index 79d19b5..c059db6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java
@@ -544,8 +544,8 @@ public class ReduceSinkDesc extends AbstractOperatorDesc {
               engineInSupported,
               engineInSupportedCondName),
           new VectorizationCondition(
-              !vectorReduceSinkDesc.getHasTopN(),
-              "No TopN"),
+              !vectorReduceSinkDesc.getHasPTFTopN(),
+              "No PTF TopN"),
           new VectorizationCondition(
               !vectorReduceSinkDesc.getHasDistinctColumns(),
               "No DISTINCT columns"),

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java
index d6230af..91d5be7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java
@@ -64,7 +64,7 @@ public class VectorReduceSinkDesc extends AbstractVectorDesc  {
 
   private boolean isVectorizationReduceSinkNativeEnabled;
   private String engine;
-  private boolean hasTopN;
+  private boolean hasPTFTopN;
   private boolean hasDistinctColumns;
   private boolean isKeyBinarySortable;
   private boolean isValueLazyBinary;
@@ -85,11 +85,11 @@ public class VectorReduceSinkDesc extends AbstractVectorDesc  {
   public String getEngine() {
     return engine;
   }
-  public void setHasTopN(boolean hasTopN) {
-    this.hasTopN = hasTopN;
+  public void setHasPTFTopN(boolean hasPTFTopN) {
+    this.hasPTFTopN = hasPTFTopN;
   }
-  public boolean getHasTopN() {
-    return hasTopN;
+  public boolean getHasPTFTopN() {
+    return hasPTFTopN;
   }
   public void setHasDistinctColumns(boolean hasDistinctColumns) {
     this.hasDistinctColumns = hasDistinctColumns;

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/queries/clientpositive/vectorization_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorization_limit.q b/ql/src/test/queries/clientpositive/vectorization_limit.q
index a4c54f2..8044484 100644
--- a/ql/src/test/queries/clientpositive/vectorization_limit.q
+++ b/ql/src/test/queries/clientpositive/vectorization_limit.q
@@ -3,6 +3,8 @@ set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 
+-- SORT_QUERY_RESULTS
+
 explain vectorization SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7;
 SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7;
 
@@ -11,31 +13,31 @@ set hive.limit.pushdown.memory.usage=0.3f;
 
 -- HIVE-3562 Some limit can be pushed down to map stage - c/p parts from limit_pushdown
 
-explain vectorization expression
+explain vectorization detail
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20;
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20;
 
 -- deduped RS
-explain vectorization expression
+explain vectorization detail
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20;
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20;
 
 -- distincts
-explain vectorization expression
+explain vectorization detail
 select distinct(ctinyint) from alltypesorc limit 20;
 select distinct(ctinyint) from alltypesorc limit 20;
 
-explain vectorization expression
+explain vectorization detail
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20;
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20;
 
 -- limit zero
-explain vectorization expression
+explain vectorization detail
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0;
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0;
 
 -- 2MR (applied to last RS)
-explain vectorization expression
+explain vectorization detail
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20;
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out b/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out
index 526662d..57ddc96 100644
--- a/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out
@@ -83,7 +83,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                             value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -126,7 +126,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -261,7 +261,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                             value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -304,7 +304,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
index 5b17144..a2ce365 100644
--- a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
@@ -1101,7 +1101,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
index a0ac248..3b44bc3 100644
--- a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
@@ -287,7 +287,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)
         Reducer 3 
@@ -531,7 +531,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkLongOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 5 Data size: 465 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
@@ -586,7 +586,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkLongOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 5 Data size: 465 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
 
@@ -1292,7 +1292,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1335,7 +1335,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out b/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
index cb9674c..d6f6ec8 100644
--- a/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
@@ -164,7 +164,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: int), _col1 (type: smallint)
             Execution mode: vectorized, llap
@@ -331,7 +331,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 5 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: int), _col1 (type: smallint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_between_in.q.out b/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
index 9b58d47..ae1b2d2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
@@ -61,7 +61,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -165,7 +165,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -265,7 +265,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -369,7 +369,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -469,7 +469,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 274112 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -565,7 +565,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 10923 Data size: 2193503 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -661,7 +661,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 274112 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -765,7 +765,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -1119,7 +1119,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -1161,7 +1161,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
@@ -1257,7 +1257,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -1299,7 +1299,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
@@ -1395,7 +1395,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -1437,7 +1437,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
@@ -1533,7 +1533,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -1575,7 +1575,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
index 339ec2c..a35659a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
@@ -185,7 +185,7 @@ STAGE PLANS:
                               Reduce Sink Vectorization:
                                   className: VectorReduceSinkObjectHashOperator
                                   native: true
-                                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                               Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                               value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
@@ -228,7 +228,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2))
             Execution mode: vectorized, llap
@@ -268,7 +268,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized, llap
@@ -383,7 +383,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -433,7 +433,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 50 Data size: 14819 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
@@ -630,7 +630,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: binary)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_bucket.q.out b/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
index 7b57223..6dd0cfb 100644
--- a/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_bucket.q.out
@@ -52,7 +52,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string), _col1 (type: string)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
index c330097..65fafb0 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
@@ -111,10 +111,9 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: char(20))
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkStringOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: bigint), _col2 (type: bigint)
@@ -155,10 +154,9 @@ STAGE PLANS:
                   key expressions: _col0 (type: char(20))
                   sort order: +
                   Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false
+                      className: VectorReduceSinkObjectHashOperator
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint), _col2 (type: bigint)
@@ -309,10 +307,9 @@ STAGE PLANS:
                         sort order: -
                         Map-reduce partition columns: _col0 (type: char(20))
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkStringOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: bigint), _col2 (type: bigint)
@@ -353,10 +350,9 @@ STAGE PLANS:
                   key expressions: _col0 (type: char(20))
                   sort order: -
                   Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false
+                      className: VectorReduceSinkObjectHashOperator
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint), _col2 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out b/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
index e0df3c0..ab67adc 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
@@ -188,7 +188,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: char(10))
             Execution mode: vectorized, llap
@@ -231,7 +231,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
@@ -343,7 +343,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap
@@ -399,7 +399,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: char(20))
             Execution mode: vectorized, llap
@@ -527,7 +527,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: string)
             Execution mode: vectorized, llap
@@ -570,7 +570,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3 Data size: 273 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: int)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
index d3ab3f2..5e1dea8 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
@@ -77,7 +77,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -163,7 +163,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -251,10 +251,9 @@ STAGE PLANS:
                           className: VectorLimitOperator
                           native: true
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -262,7 +261,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
index 578f849..358d8ae 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
@@ -39,10 +39,9 @@ STAGE PLANS:
                           projectedOutputColumns: [6, 2, 4, 1, 16]
                           selectExpressions: VectorCoalesce(columns [12, 6, 13, 14, 15])(children: ConstantVectorExpression(val null) -> 12:string, col 6, CastLongToString(col 2) -> 13:String, VectorUDFAdaptor(null(cfloat)) -> 14:string, CastLongToString(col 1) -> 15:String) -> 16:string
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -50,7 +49,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: true
                 vectorized: true
         Reducer 2 
@@ -145,10 +144,9 @@ STAGE PLANS:
                           projectedOutputColumns: [5, 2, 15]
                           selectExpressions: VectorCoalesce(columns [12, 14, 13])(children: ConstantVectorExpression(val null) -> 12:double, DoubleColAddDoubleColumn(col 5, col 13)(children: FuncLog2LongToDouble(col 2) -> 13:double) -> 14:double, ConstantVectorExpression(val 0.0) -> 13:double) -> 15:double
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -156,7 +154,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -250,10 +248,9 @@ STAGE PLANS:
                           native: true
                           projectedOutputColumns: []
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -261,7 +258,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -356,10 +353,9 @@ STAGE PLANS:
                           projectedOutputColumns: [8, 9, 12]
                           selectExpressions: VectorCoalesce(columns [8, 9])(children: col 8, col 9) -> 12:timestamp
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -367,7 +363,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -460,10 +456,9 @@ STAGE PLANS:
                           native: true
                           projectedOutputColumns: []
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
             Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
@@ -471,7 +466,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
index 2a3eff5..d57d39f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
@@ -241,7 +241,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkStringOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap


[40/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/test/queries/clientpositive/mm_all.q
----------------------------------------------------------------------
diff --cc ql/src/test/queries/clientpositive/mm_all.q
index a6a7c8f,0000000..e2c8e97
mode 100644,000000..100644
--- a/ql/src/test/queries/clientpositive/mm_all.q
+++ b/ql/src/test/queries/clientpositive/mm_all.q
@@@ -1,470 -1,0 +1,470 @@@
 +set hive.mapred.mode=nonstrict;
 +set hive.explain.user=false;
 +set hive.fetch.task.conversion=none;
 +set tez.grouping.min-size=1;
 +set tez.grouping.max-size=2;
 +set hive.exec.dynamic.partition.mode=nonstrict;
 +set hive.support.concurrency=true;
 +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 +
 +
 +-- Force multiple writers when reading
 +drop table intermediate;
 +create table intermediate(key int) partitioned by (p int) stored as orc;
 +insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2;
 +insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2;
 +insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2;
 +
 +
 +drop table part_mm;
 +create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +explain insert into table part_mm partition(key_mm=455) select key from intermediate;
 +insert into table part_mm partition(key_mm=455) select key from intermediate;
 +insert into table part_mm partition(key_mm=456) select key from intermediate;
 +insert into table part_mm partition(key_mm=455) select key from intermediate;
 +select * from part_mm order by key, key_mm;
 +
 +-- TODO: doesn't work truncate table part_mm partition(key_mm=455);
 +select * from part_mm order by key, key_mm;
 +truncate table part_mm;
 +select * from part_mm order by key, key_mm;
 +drop table part_mm;
 +
 +drop table simple_mm;
 +create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +insert into table simple_mm select key from intermediate;
 +insert overwrite table simple_mm select key from intermediate;
 +select * from simple_mm order by key;
 +insert into table simple_mm select key from intermediate;
 +select * from simple_mm order by key;
 +truncate table simple_mm;
 +select * from simple_mm;
 +drop table simple_mm;
 +
 +
 +-- simple DP (no bucketing)
 +drop table dp_mm;
 +
 +set hive.exec.dynamic.partition.mode=nonstrict;
 +
 +set hive.merge.mapredfiles=false;
 +set hive.merge.sparkfiles=false;
 +set hive.merge.tezfiles=false;
 +
 +create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc
 +  tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +
 +insert into table dp_mm partition (key1='123', key2) select key, key from intermediate;
 +
 +select * from dp_mm order by key;
 +
 +drop table dp_mm;
 +
 +
 +-- union
 +
 +create table union_mm(id int)  tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +insert into table union_mm 
 +select temps.p from (
 +select key as p from intermediate 
 +union all 
 +select key + 1 as p from intermediate ) temps;
 +
 +select * from union_mm order by id;
 +
 +insert into table union_mm 
 +select p from
 +(
 +select key + 1 as p from intermediate
 +union all
 +select key from intermediate
 +) tab group by p
 +union all
 +select key + 2 as p from intermediate;
 +
 +select * from union_mm order by id;
 +
 +insert into table union_mm
 +SELECT p FROM
 +(
 +  SELECT key + 1 as p FROM intermediate
 +  UNION ALL
 +  SELECT key as p FROM ( 
 +    SELECT distinct key FROM (
 +      SELECT key FROM (
 +        SELECT key + 2 as key FROM intermediate
 +        UNION ALL
 +        SELECT key FROM intermediate
 +      )t1 
 +    group by key)t2
 +  )t3
 +)t4
 +group by p;
 +
 +
 +select * from union_mm order by id;
 +drop table union_mm;
 +
 +
 +create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +insert into table partunion_mm partition(key)
 +select temps.* from (
 +select key as p, key from intermediate 
 +union all 
 +select key + 1 as p, key + 1 from intermediate ) temps;
 +
 +select * from partunion_mm order by id;
 +drop table partunion_mm;
 +
 +
 +
 +create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3))
 + stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +
 +insert into table skew_mm 
 +select key, key, key from intermediate;
 +
 +select * from skew_mm order by k2, k1, k4;
 +drop table skew_mm;
 +
 +
 +create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) 
 +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +
 +insert into table skew_dp_union_mm partition (k3)
 +select key as i, key as j, key as k, key as l from intermediate
 +union all 
 +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate;
 +
 +
 +select * from skew_dp_union_mm order by k2, k1, k4;
 +drop table skew_dp_union_mm;
 +
 +
 +
 +set hive.merge.orcfile.stripe.level=true;
 +set hive.merge.tezfiles=true;
 +set hive.merge.mapfiles=true;
 +set hive.merge.mapredfiles=true;
 +
 +
 +create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +
 +insert into table merge0_mm select key from intermediate;
 +select * from merge0_mm;
 +
 +set tez.grouping.split-count=1;
 +insert into table merge0_mm select key from intermediate;
 +set tez.grouping.split-count=0;
 +select * from merge0_mm;
 +
 +drop table merge0_mm;
 +
 +
 +create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +
 +insert into table merge2_mm select key from intermediate;
 +select * from merge2_mm;
 +
 +set tez.grouping.split-count=1;
 +insert into table merge2_mm select key from intermediate;
 +set tez.grouping.split-count=0;
 +select * from merge2_mm;
 +
 +drop table merge2_mm;
 +
 +
 +create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +
 +insert into table merge1_mm partition (key) select key, key from intermediate;
 +select * from merge1_mm order by id, key;
 +
 +set tez.grouping.split-count=1;
 +insert into table merge1_mm partition (key) select key, key from intermediate;
 +set tez.grouping.split-count=0;
 +select * from merge1_mm order by id, key;
 +
 +drop table merge1_mm;
 +
 +set hive.merge.tezfiles=false;
 +set hive.merge.mapfiles=false;
 +set hive.merge.mapredfiles=false;
 +
 +-- TODO: need to include merge+union+DP, but it's broken for now
 +
 +
 +drop table ctas0_mm;
 +create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate;
 +select * from ctas0_mm;
 +drop table ctas0_mm;
 +
 +drop table ctas1_mm;
 +create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
 +  select * from intermediate union all select * from intermediate;
 +select * from ctas1_mm;
 +drop table ctas1_mm;
 +
 +
 +
 +drop table iow0_mm;
 +create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +insert overwrite table iow0_mm select key from intermediate;
 +insert into table iow0_mm select key + 1 from intermediate;
 +select * from iow0_mm order by key;
 +insert overwrite table iow0_mm select key + 2 from intermediate;
 +select * from iow0_mm order by key;
 +drop table iow0_mm;
 +
 +
 +drop table iow1_mm; 
 +create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +insert overwrite table iow1_mm partition (key2)
 +select key as k1, key from intermediate union all select key as k1, key from intermediate;
 +insert into table iow1_mm partition (key2)
 +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate;
 +select * from iow1_mm order by key, key2;
 +insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate;
 +select * from iow1_mm order by key, key2;
 +insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate;
 +select * from iow1_mm order by key, key2;
 +drop table iow1_mm;
 +
 +
 +
 +
 +drop table load0_mm;
 +create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +load data local inpath '../../data/files/kv1.txt' into table load0_mm;
 +select count(1) from load0_mm;
 +load data local inpath '../../data/files/kv2.txt' into table load0_mm;
 +select count(1) from load0_mm;
 +load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm;
 +select count(1) from load0_mm;
 +drop table load0_mm;
 +
 +
 +drop table intermediate2;
 +create table intermediate2 (key string, value string) stored as textfile
 +location 'file:${system:test.tmp.dir}/intermediate2';
 +load data local inpath '../../data/files/kv1.txt' into table intermediate2;
 +load data local inpath '../../data/files/kv2.txt' into table intermediate2;
 +load data local inpath '../../data/files/kv3.txt' into table intermediate2;
 +
 +drop table load1_mm;
 +create table load1_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' into table load1_mm;
 +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv1.txt' into table load1_mm;
 +select count(1) from load1_mm;
 +load data local inpath '../../data/files/kv1.txt' into table intermediate2;
 +load data local inpath '../../data/files/kv2.txt' into table intermediate2;
 +load data local inpath '../../data/files/kv3.txt' into table intermediate2;
 +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' overwrite into table load1_mm;
 +select count(1) from load1_mm;
 +load data local inpath '../../data/files/kv2.txt' into table intermediate2;
 +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' overwrite into table load1_mm;
 +select count(1) from load1_mm;
 +drop table load1_mm;
 +
 +drop table load2_mm;
 +create table load2_mm (key string, value string)
 +  partitioned by (k int, l int) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +load data local inpath '../../data/files/kv1.txt' into table intermediate2;
 +load data local inpath '../../data/files/kv2.txt' into table intermediate2;
 +load data local inpath '../../data/files/kv3.txt' into table intermediate2;
 +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' into table load2_mm partition(k=5, l=5);
 +select count(1) from load2_mm;
 +drop table load2_mm;
 +drop table intermediate2;
 +
 +
 +drop table intermediate_nonpart;
 +drop table intermmediate_part;
 +drop table intermmediate_nonpart;
 +create table intermediate_nonpart(key int, p int);
 +insert into intermediate_nonpart select * from intermediate;
 +create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +insert into intermmediate_nonpart select * from intermediate;
 +create table intermmediate(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +insert into table intermmediate partition(p) select key, p from intermediate;
 +
 +set hive.exim.test.mode=true;
 +
 +export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart';
 +export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart';
 +export table intermediate to 'ql/test/data/exports/intermediate_part';
 +export table intermmediate to 'ql/test/data/exports/intermmediate_part';
 +
 +drop table intermediate_nonpart;
 +drop table intermmediate_part;
 +drop table intermmediate_nonpart;
 +
 +-- non-MM export to MM table, with and without partitions
 +
 +drop table import0_mm;
 +create table import0_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +import table import0_mm from 'ql/test/data/exports/intermediate_nonpart';
 +select * from import0_mm order by key, p;
 +drop table import0_mm;
 +
 +
 +
 +drop table import1_mm;
 +create table import1_mm(key int) partitioned by (p int)
 +  stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +import table import1_mm from 'ql/test/data/exports/intermediate_part';
 +select * from import1_mm order by key, p;
 +drop table import1_mm;
 +
 +
 +-- MM export into new MM table, non-part and part
 +
- drop table import2_mm;
- import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart';
- desc import2_mm;
- select * from import2_mm order by key, p;
- drop table import2_mm;
- 
- drop table import3_mm;
- import table import3_mm from 'ql/test/data/exports/intermmediate_part';
- desc import3_mm;
- select * from import3_mm order by key, p;
- drop table import3_mm;
++--drop table import2_mm;
++--import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart';
++--desc import2_mm;
++--select * from import2_mm order by key, p;
++--drop table import2_mm;
++--
++--drop table import3_mm;
++--import table import3_mm from 'ql/test/data/exports/intermmediate_part';
++--desc import3_mm;
++--select * from import3_mm order by key, p;
++--drop table import3_mm;
 +
 +-- MM export into existing MM table, non-part and partial part
 +
 +drop table import4_mm;
 +create table import4_mm(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart';
 +select * from import4_mm order by key, p;
 +drop table import4_mm;
 +
 +drop table import5_mm;
 +create table import5_mm(key int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part';
 +select * from import5_mm order by key, p;
 +drop table import5_mm;
 +
 +-- MM export into existing non-MM table, non-part and part
 +
 +drop table import6_mm;
 +create table import6_mm(key int, p int);
 +import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart';
 +select * from import6_mm order by key, p;
 +drop table import6_mm;
 +
 +drop table import7_mm;
 +create table import7_mm(key int) partitioned by (p int);
 +import table import7_mm from 'ql/test/data/exports/intermmediate_part';
 +select * from import7_mm order by key, p;
 +drop table import7_mm;
 +
 +set hive.exim.test.mode=false;
 +
 +
 +
 +drop table multi0_1_mm;
 +drop table multi0_2_mm;
 +create table multi0_1_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +create table multi0_2_mm (key int, key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +
 +from intermediate
 +insert overwrite table multi0_1_mm select key, p
 +insert overwrite table multi0_2_mm select p, key;
 +
 +select * from multi0_1_mm order by key, key2;
 +select * from multi0_2_mm order by key, key2;
 +
 +set hive.merge.mapredfiles=true;
 +set hive.merge.sparkfiles=true;
 +set hive.merge.tezfiles=true;
 +
 +from intermediate
 +insert into table multi0_1_mm select p, key
 +insert overwrite table multi0_2_mm select key, p;
 +select * from multi0_1_mm order by key, key2;
 +select * from multi0_2_mm order by key, key2;
 +
 +set hive.merge.mapredfiles=false;
 +set hive.merge.sparkfiles=false;
 +set hive.merge.tezfiles=false;
 +
 +drop table multi0_1_mm;
 +drop table multi0_2_mm;
 +
 +
 +drop table multi1_mm;
 +create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +from intermediate
 +insert into table multi1_mm partition(p=1) select p, key
 +insert into table multi1_mm partition(p=2) select key, p;
 +select * from multi1_mm order by key, key2, p;
 +from intermediate
 +insert into table multi1_mm partition(p=2) select p, key
 +insert overwrite table multi1_mm partition(p=1) select key, p;
 +select * from multi1_mm order by key, key2, p;
 +
 +from intermediate
 +insert into table multi1_mm partition(p) select p, key, p
 +insert into table multi1_mm partition(p=1) select key, p;
 +select key, key2, p from multi1_mm order by key, key2, p;
 +
 +from intermediate
 +insert into table multi1_mm partition(p) select p, key, 1
 +insert into table multi1_mm partition(p=1) select key, p;
 +select key, key2, p from multi1_mm order by key, key2, p;
 +drop table multi1_mm;
 +
 +
 +
 +
 +set datanucleus.cache.collections=false;
 +set hive.stats.autogather=true;
 +
 +drop table stats_mm;
 +create table stats_mm(key int)  tblproperties("transactional"="true", "transactional_properties"="insert_only");
 +insert overwrite table stats_mm  select key from intermediate;
 +desc formatted stats_mm;
 +
 +insert into table stats_mm  select key from intermediate;
 +desc formatted stats_mm;
 +drop table stats_mm;
 +
 +drop table stats2_mm;
 +create table stats2_mm tblproperties("transactional"="true", "transactional_properties"="insert_only") as select array(key, value) from src;
 +desc formatted stats2_mm;
 +drop table stats2_mm;
 +
 +
 +set hive.optimize.skewjoin=true;
 +set hive.skewjoin.key=2;
 +set hive.optimize.metadataonly=false;
 +
 +CREATE TABLE skewjoin_mm(key INT, value STRING) STORED AS TEXTFILE tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +FROM src src1 JOIN src src2 ON (src1.key = src2.key) INSERT OVERWRITE TABLE skewjoin_mm SELECT src1.key, src2.value;
 +select count(distinct key) from skewjoin_mm;
 +drop table skewjoin_mm;
 +
 +set hive.optimize.skewjoin=false;
 +
 +set hive.optimize.index.filter=true;
 +set hive.auto.convert.join=false;
 +CREATE TABLE parquet1_mm(id INT) STORED AS PARQUET tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +INSERT INTO parquet1_mm VALUES(1), (2);
 +CREATE TABLE parquet2_mm(id INT, value STRING) STORED AS PARQUET tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 +INSERT INTO parquet2_mm VALUES(1, 'value1');
 +INSERT INTO parquet2_mm VALUES(1, 'value2');
 +select parquet1_mm.id, t1.value, t2.value FROM parquet1_mm
 +  JOIN parquet2_mm t1 ON parquet1_mm.id=t1.id
 +  JOIN parquet2_mm t2 ON parquet1_mm.id=t2.id
 +where t1.value = 'value1' and t2.value = 'value2';
 +drop table parquet1_mm;
 +drop table parquet2_mm;
 +
 +set hive.auto.convert.join=true;
 +
 +drop table intermediate;
 +
 +
 +


[24/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
index ed509de..5c36a4a 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
@@ -128,7 +128,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized, llap
@@ -311,7 +311,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized, llap
@@ -494,7 +494,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean)
             Execution mode: vectorized, llap
@@ -677,7 +677,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized, llap
@@ -820,7 +820,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
index cd9dfc7..fa4b51f 100644
--- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
@@ -60,7 +60,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -162,7 +162,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -260,7 +260,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -362,7 +362,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -460,7 +460,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 274112 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -554,7 +554,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 10923 Data size: 2193503 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -648,7 +648,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 274112 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -750,7 +750,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkObjectHashOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -1102,7 +1102,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
@@ -1143,7 +1143,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
@@ -1238,7 +1238,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
@@ -1279,7 +1279,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
@@ -1374,7 +1374,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
@@ -1415,7 +1415,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 
@@ -1510,7 +1510,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
@@ -1551,7 +1551,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
index 4454445..ae46718 100644
--- a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
@@ -1281,7 +1281,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -1329,7 +1329,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
index 57c1963..3244e47 100644
--- a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
@@ -230,10 +230,9 @@ STAGE PLANS:
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
                       Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false
+                          className: VectorReduceSinkObjectHashOperator
+                          native: true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
@@ -243,7 +242,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index e07cf83..edda919 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)), _col9 (type: bigint)
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
index b7b0603..59dcf7c 100644
--- a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
@@ -156,7 +156,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
index e4e9761..94b3ef6 100644
--- a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
@@ -158,7 +158,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: bigint)
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
index dc80037..2f2609f 100644
--- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
@@ -157,7 +157,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -428,7 +428,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
index e9b9fca..fd3469c 100644
--- a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
@@ -159,7 +159,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
@@ -200,7 +200,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
index 098b898..b9ec98a 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -828,7 +828,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
index f238b0a..9e17983 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -409,7 +409,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkObjectHashOperator
                                 native: true
-                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
index f7fe637..b361ec0 100644
--- a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
@@ -367,10 +367,9 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
@@ -407,10 +406,9 @@ STAGE PLANS:
                   key expressions: _col0 (type: string)
                   sort order: +
                   Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false
+                      className: VectorReduceSinkObjectHashOperator
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
index eda5612..ae7e0c3 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: tinyint), _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized
@@ -101,7 +101,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
@@ -216,7 +216,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -255,7 +255,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized
@@ -512,7 +512,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized
@@ -551,7 +551,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
@@ -666,7 +666,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -705,7 +705,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized
@@ -962,7 +962,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized
@@ -1001,7 +1001,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint)
         Reducer 3 
@@ -1116,7 +1116,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: double)
             Execution mode: vectorized
@@ -1155,7 +1155,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_7.q.out b/ql/src/test/results/clientpositive/spark/vectorization_7.q.out
index 9783907..d2ff353 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_7.q.out
@@ -95,10 +95,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                         sort order: +++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 7281 Data size: 223523 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
@@ -107,7 +106,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -329,10 +328,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: boolean), _col1 (type: bigint), _col2 (type: smallint), _col3 (type: tinyint), _col4 (type: timestamp), _col5 (type: string), _col6 (type: bigint), _col7 (type: int), _col8 (type: smallint), _col9 (type: tinyint), _col10 (type: int), _col11 (type: bigint), _col12 (type: int), _col13 (type: tinyint), _col14 (type: tinyint)
                         sort order: +++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 7281 Data size: 223523 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
@@ -341,7 +339,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_8.q.out b/ql/src/test/results/clientpositive/spark/vectorization_8.q.out
index b5c056f..927ee59 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_8.q.out
@@ -91,10 +91,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: float), _col13 (type: double)
                         sort order: ++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
@@ -103,7 +102,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -312,10 +311,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: timestamp), _col1 (type: double), _col2 (type: boolean), _col3 (type: string), _col4 (type: float), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: float), _col12 (type: float), _col13 (type: double)
                         sort order: ++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform Hash IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
@@ -324,7 +322,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
index 81979d6..788c2ee 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
@@ -225,10 +225,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: bigint), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: decimal(22,21))
@@ -238,7 +237,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -443,10 +442,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: double), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double)
@@ -456,7 +454,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
index 361384f..7e1cde0 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
index 6d7c103..cc0f029 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
@@ -1009,10 +1009,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: timestamp), _col3 (type: string), _col4 (type: boolean), _col5 (type: tinyint), _col6 (type: float), _col7 (type: timestamp), _col8 (type: smallint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: int), _col12 (type: decimal(14,3)), _col13 (type: smallint), _col14 (type: smallint), _col15 (type: smallint), _col16 (type: double), _col17 (type: decimal(15,3)), _col18 (type: float), _col19 (type: double), _col20 (type: double), _col21 (type: tinyint), _col22 (type: decimal(9,7))
                         sort order: +++++++++++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9898 Data size: 303864 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
@@ -1021,7 +1020,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -1306,10 +1305,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: boolean), _col4 (type: float), _col5 (type: double), _col6 (type: timestamp), _col7 (type: smallint), _col8 (type: string), _col9 (type: boolean), _col10 (type: double), _col11 (type: decimal(5,3)), _col12 (type: double), _col13 (type: float), _col14 (type: float), _col15 (type: float), _col16 (type: float), _col17 (type: double), _col18 (type: double), _col19 (type: bigint), _col20 (type: double), _col21 (type: smallint), _col22 (type: bigint), _col23 (type: double), _col24 (type: smallint)
                         sort order: +++++++++++++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
@@ -1318,7 +1316,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -1552,10 +1550,9 @@ STAGE PLANS:
                         key expressions: _col8 (type: boolean), _col1 (type: string), _col3 (type: timestamp), _col5 (type: float), _col6 (type: bigint), _col1 (type: string), _col4 (type: double), _col0 (type: int), _col7 (type: smallint), _col4 (type: double), _col9 (type: int), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: float), _col13 (type: bigint), _col14 (type: double), _col15 (type: double), _col16 (type: bigint), _col17 (type: double), _col18 (type: decimal(8,7)), _col19 (type: double), _col20 (type: smallint), _col21 (type: int)
                         sort order: +++++++++++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 10922 Data size: 335301 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: boolean)
@@ -1565,7 +1562,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -1856,10 +1853,9 @@ STAGE PLANS:
                         key expressions: _col5 (type: smallint), _col1 (type: string), _col2 (type: double), _col3 (type: float), _col4 (type: bigint), _col6 (type: double), _col7 (type: int), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: float), _col13 (type: int), _col14 (type: double), _col15 (type: double)
                         sort order: +++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3868 Data size: 118746 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col0 (type: timestamp)
@@ -1869,7 +1865,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -3253,7 +3249,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -3365,7 +3361,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -3549,7 +3545,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -3661,7 +3657,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -3773,7 +3769,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -3885,7 +3881,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -3997,7 +3993,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized
@@ -4109,7 +4105,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
index ead71ad..1096c90 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
@@ -298,7 +298,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized
@@ -423,7 +423,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized


[03/50] [abbrv] hive git commit: Revert "HIVE-16047: Shouldn't try to get KeyProvider unless encryption is enabled (Rui reviewed by Xuefu and Ferdinand)"

Posted by we...@apache.org.
Revert "HIVE-16047: Shouldn't try to get KeyProvider unless encryption is enabled (Rui reviewed by Xuefu and Ferdinand)"


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d03261c4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d03261c4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d03261c4

Branch: refs/heads/hive-14535
Commit: d03261c473312803a7a0cb0abd1f14d50444887d
Parents: 40b70eb
Author: Rui Li <li...@apache.org>
Authored: Wed May 3 11:54:26 2017 +0800
Committer: Rui Li <li...@apache.org>
Committed: Wed May 3 11:54:26 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/shims/Hadoop23Shims.java | 16 +---------------
 1 file changed, 1 insertion(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d03261c4/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
----------------------------------------------------------------------
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index 21a18f8..0483e91 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -57,7 +57,6 @@ import org.apache.hadoop.fs.TrashPolicy;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
@@ -1149,23 +1148,10 @@ public class Hadoop23Shims extends HadoopShimsSecure {
       DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.get(uri, conf);
 
       this.conf = conf;
-      this.keyProvider = isEncryptionEnabled(dfs.getClient(), dfs.getConf()) ?
-          dfs.getClient().getKeyProvider() : null;
+      this.keyProvider = dfs.getClient().getKeyProvider();
       this.hdfsAdmin = new HdfsAdmin(uri, conf);
     }
 
-    private boolean isEncryptionEnabled(DFSClient client, Configuration conf) {
-      try {
-        DFSClient.class.getMethod("isHDFSEncryptionEnabled");
-      } catch (NoSuchMethodException e) {
-        // The method is available since Hadoop-2.7.1; if we run with an older Hadoop, check this
-        // ourselves. Note that this setting is in turn deprected in newer versions of Hadoop, but
-        // we only care for it in the older versions; so we will hardcode the old name here.
-        return !conf.getTrimmed("dfs.encryption.key.provider.uri", "").isEmpty();
-      }
-      return client.isHDFSEncryptionEnabled();
-    }
-
     @Override
     public boolean isPathEncrypted(Path path) throws IOException {
       Path fullPath;


[20/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_limit.q.out b/ql/src/test/results/clientpositive/vectorization_limit.q.out
index fc774f0..7381294 100644
--- a/ql/src/test/results/clientpositive/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_limit.q.out
@@ -60,17 +60,17 @@ POSTHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdoubl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--1887561756	1839.0
 -1887561756	-10011.0
 -1887561756	-13877.0
--1887561756	10361.0
--1887561756	-8881.0
 -1887561756	-2281.0
+-1887561756	-8881.0
+-1887561756	10361.0
+-1887561756	1839.0
 -1887561756	9531.0
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain vectorization detail
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -112,8 +112,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.3
                   value expressions: _col2 (type: smallint)
@@ -126,6 +126,11 @@ STAGE PLANS:
           allNative: false
           usesVectorUDFAdaptor: false
           vectorized: true
+          rowBatchContext:
+              dataColumnCount: 12
+              includeColumns: [0, 1, 5]
+              dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+              partitionColumnCount: 0
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -160,10 +165,17 @@ POSTHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyi
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--64	-15920.0	-15920
 -64	-10462.0	-10462
--64	-9842.0	-9842
--64	-8080.0	-8080
+-64	-15920.0	-15920
+-64	-1600.0	-1600
+-64	-200.0	-200
+-64	-2919.0	-2919
+-64	-3097.0	-3097
+-64	-3586.0	-3586
+-64	-4018.0	-4018
+-64	-4040.0	-4040
+-64	-4803.0	-4803
+-64	-6907.0	-6907
 -64	-7196.0	-7196
 -64	-7196.0	-7196
 -64	-7196.0	-7196
@@ -171,19 +183,12 @@ POSTHOOK: Input: default@alltypesorc
 -64	-7196.0	-7196
 -64	-7196.0	-7196
 -64	-7196.0	-7196
--64	-6907.0	-6907
--64	-4803.0	-4803
--64	-4040.0	-4040
--64	-4018.0	-4018
--64	-3586.0	-3586
--64	-3097.0	-3097
--64	-2919.0	-2919
--64	-1600.0	-1600
--64	-200.0	-200
-PREHOOK: query: explain vectorization expression
+-64	-8080.0	-8080
+-64	-9842.0	-9842
+PREHOOK: query: explain vectorization detail
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -243,6 +248,12 @@ STAGE PLANS:
           allNative: false
           usesVectorUDFAdaptor: false
           vectorized: true
+          rowBatchContext:
+              dataColumnCount: 12
+              includeColumns: [0, 5]
+              dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+              partitionColumnCount: 0
+              scratchColumnTypeNames: double
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -283,30 +294,30 @@ POSTHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	9370.0945309795
--64	373.52941176470586
--63	2178.7272727272725
--62	245.69387755102042
--61	914.3404255319149
--60	1071.82
--59	318.27272727272725
--58	3483.2444444444445
--57	1867.0535714285713
--56	2595.818181818182
--55	2385.595744680851
--54	2712.7272727272725
--53	-532.7567567567568
--52	2810.705882352941
--51	-96.46341463414635
--50	-960.0192307692307
--49	768.7659574468086
--48	1672.909090909091
--47	-574.6428571428571
 -46	3033.55
-PREHOOK: query: explain vectorization expression
+-47	-574.6428571428571
+-48	1672.909090909091
+-49	768.7659574468086
+-50	-960.0192307692307
+-51	-96.46341463414635
+-52	2810.705882352941
+-53	-532.7567567567568
+-54	2712.7272727272725
+-55	2385.595744680851
+-56	2595.818181818182
+-57	1867.0535714285713
+-58	3483.2444444444445
+-59	318.27272727272725
+-60	1071.82
+-61	914.3404255319149
+-62	245.69387755102042
+-63	2178.7272727272725
+-64	373.52941176470586
+NULL	9370.0945309795
+PREHOOK: query: explain vectorization detail
 select distinct(ctinyint) from alltypesorc limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select distinct(ctinyint) from alltypesorc limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -353,8 +364,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.3
       Execution mode: vectorized
@@ -366,6 +377,11 @@ STAGE PLANS:
           allNative: false
           usesVectorUDFAdaptor: false
           vectorized: true
+          rowBatchContext:
+              dataColumnCount: 12
+              includeColumns: [0]
+              dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+              partitionColumnCount: 0
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -405,30 +421,30 @@ POSTHOOK: query: select distinct(ctinyint) from alltypesorc limit 20
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL
--64
--63
--62
--61
--60
--59
--58
--57
--56
--55
--54
--53
--52
--51
--50
--49
--48
--47
 -46
-PREHOOK: query: explain vectorization expression
+-47
+-48
+-49
+-50
+-51
+-52
+-53
+-54
+-55
+-56
+-57
+-58
+-59
+-60
+-61
+-62
+-63
+-64
+NULL
+PREHOOK: query: explain vectorization detail
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -477,8 +493,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false, No DISTINCT columns IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.3
       Execution mode: vectorized
@@ -490,6 +506,11 @@ STAGE PLANS:
           allNative: false
           usesVectorUDFAdaptor: false
           vectorized: true
+          rowBatchContext:
+              dataColumnCount: 12
+              includeColumns: [0, 5]
+              dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+              partitionColumnCount: 0
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -530,30 +551,30 @@ POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc grou
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	2932
--64	24
--63	19
--62	27
--61	25
--60	27
--59	31
--58	23
--57	35
--56	36
--55	29
--54	26
--53	22
--52	33
--51	21
--50	30
--49	26
--48	29
--47	22
 -46	24
-PREHOOK: query: explain vectorization expression
+-47	22
+-48	29
+-49	26
+-50	30
+-51	21
+-52	33
+-53	22
+-54	26
+-55	29
+-56	36
+-57	35
+-58	23
+-59	31
+-60	27
+-61	25
+-62	27
+-63	19
+-64	24
+NULL	2932
+PREHOOK: query: explain vectorization detail
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -578,10 +599,10 @@ POSTHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limi
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain vectorization detail
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -630,7 +651,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
@@ -643,6 +664,11 @@ STAGE PLANS:
           allNative: false
           usesVectorUDFAdaptor: false
           vectorized: true
+          rowBatchContext:
+              dataColumnCount: 12
+              includeColumns: [0, 5]
+              dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+              partitionColumnCount: 0
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -678,8 +704,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.3
       Execution mode: vectorized
@@ -691,6 +717,11 @@ STAGE PLANS:
           allNative: false
           usesVectorUDFAdaptor: false
           vectorized: true
+          rowBatchContext:
+              dataColumnCount: 2
+              includeColumns: [0, 1]
+              dataColumns: _col0:double, _col1:bigint
+              partitionColumnCount: 0
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -725,23 +756,23 @@ POSTHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where cti
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	-32768
+-10462.0	-64
+-1121.0	-89
+-11322.0	-101
+-11492.0	-78
+-15920.0	-64
+-4803.0	-64
+-6907.0	-64
 -7196.0	-2009
+-8080.0	-64
+-8118.0	-80
+-9842.0	-64
+10496.0	-67
 15601.0	-1733
-4811.0	-115
--11322.0	-101
--1121.0	-89
-7705.0	-88
 3520.0	-86
--8118.0	-80
+4811.0	-115
 5241.0	-80
--11492.0	-78
-9452.0	-76
 557.0	-75
-10496.0	-67
--15920.0	-64
--10462.0	-64
--9842.0	-64
--8080.0	-64
--6907.0	-64
--4803.0	-64
+7705.0	-88
+9452.0	-76
+NULL	-32768

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out b/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out
index 5da2ad1..b7442d4 100644
--- a/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out
@@ -108,8 +108,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col2 (type: smallint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_case.q.out b/ql/src/test/results/clientpositive/vectorized_case.q.out
index 2be619a..5a7a8a2 100644
--- a/ql/src/test/results/clientpositive/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_case.q.out
@@ -287,7 +287,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint), _col1 (type: bigint)
@@ -398,7 +398,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint), _col1 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
index 35574f4..4248d08 100644
--- a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
@@ -1250,7 +1250,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: date), _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint)
@@ -1297,7 +1297,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out b/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
index 5e0f699..52aa05b 100644
--- a/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
@@ -119,7 +119,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkOperator
                           native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out b/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out
index 23b910b..46e51f7 100644
--- a/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out
@@ -412,7 +412,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 11 Data size: 121 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: int), _col2 (type: smallint), _col3 (type: bigint), _col4 (type: double), _col5 (type: double), _col6 (type: decimal(4,2))

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out
index 4c3093d..d42369f 100644
--- a/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out
@@ -134,7 +134,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorized_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp.q.out
index df7ee50..df8297c 100644
--- a/ql/src/test/results/clientpositive/vectorized_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_timestamp.q.out
@@ -308,7 +308,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1 Data size: 80 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: timestamp), _col1 (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
index aaee6e7..a4536fd 100644
--- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
@@ -122,7 +122,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
@@ -287,7 +287,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
@@ -452,7 +452,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean)
@@ -617,7 +617,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
@@ -742,7 +742,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: bigint), _col3 (type: bigint)


[30/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_count.q.out b/ql/src/test/results/clientpositive/llap/vector_count.q.out
index a6e2f59..5fa5a82 100644
--- a/ql/src/test/results/clientpositive/llap/vector_count.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_count.q.out
@@ -99,7 +99,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkOperator
                             native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             nativeConditionsNotMet: No DISTINCT columns IS false
                         Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col5 (type: bigint)
@@ -208,7 +208,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkOperator
                             native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             nativeConditionsNotMet: No DISTINCT columns IS false
                         Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint)
@@ -301,7 +301,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkOperator
                           native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           nativeConditionsNotMet: No DISTINCT columns IS false
                       Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                       value expressions: d (type: int)
@@ -397,7 +397,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkOperator
                           native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           nativeConditionsNotMet: No DISTINCT columns IS false
                       Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
index 3c03787..18e16cf 100644
--- a/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
@@ -1282,7 +1282,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 3520000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1331,7 +1331,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_data_types.q.out b/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
index f67bc25..b9bb0a2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
@@ -235,10 +235,9 @@ STAGE PLANS:
                       key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int)
                       sort order: +++
                       Reduce Sink Vectorization:
-                          className: VectorReduceSinkOperator
-                          native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                          nativeConditionsNotMet: No TopN IS false
+                          className: VectorReduceSinkObjectHashOperator
+                          native: true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                       TopN Hash Memory Usage: 0.1
                       value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)
@@ -249,7 +248,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
index 44119b2..ab38382 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)), _col9 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
index 02888e7..2847807 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
@@ -63,10 +63,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: decimal(25,14)), _col1 (type: decimal(26,14)), _col2 (type: decimal(38,13)), _col3 (type: decimal(38,17)), _col4 (type: decimal(12,10)), _col5 (type: int), _col6 (type: smallint), _col7 (type: tinyint), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: double), _col11 (type: float), _col12 (type: string), _col13 (type: timestamp)
                         sort order: ++++++++++++++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -76,7 +75,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
index 08aca71..c62e25a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
@@ -183,7 +183,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
index 8bd80cf..00bb50a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
@@ -73,7 +73,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(11,0))
             Execution mode: vectorized, llap
@@ -175,7 +175,7 @@ STAGE PLANS:
                           className: VectorReduceSinkObjectHashOperator
                           keyExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -1) -> 1:decimal(11,0)
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized, llap
@@ -501,7 +501,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(11,0))
             Execution mode: vectorized, llap
@@ -603,7 +603,7 @@ STAGE PLANS:
                           className: VectorReduceSinkObjectHashOperator
                           keyExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0, decimalPlaces -1) -> 1:decimal(11,0)
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
index 6083a10..a3bf091 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
@@ -77,7 +77,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0))
             Execution mode: vectorized, llap
@@ -228,7 +228,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(25,4)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(22,1)), _col13 (type: decimal(23,2)), _col14 (type: decimal(24,3)), _col15 (type: decimal(25,4)), _col16 (type: decimal(21,0)), _col17 (type: decimal(21,0)), _col18 (type: decimal(21,0)), _col19 (type: decimal(21,0))
             Execution mode: vectorized, llap
@@ -406,7 +406,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(21,0)), _col3 (type: decimal(21,0)), _col4 (type: decimal(21,0)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0)), _col13 (type: decimal(21,0)), _col14 (type: decimal(21,0)), _col15 (type: decimal(21,0)), _col16 (type: decimal(21,0)), _col17 (type: decimal(22,1)), _col18 (type: decimal(23,2)), _col19 (type: decimal(24,3)), _col20 (type: decimal(25,4)), _col21 (type: decimal(26,5)), _col22 (type: decimal(27,6)), _col23 (type: decimal(28,7)), _col24 (type: decimal(29,8)), _col25 (type: decimal(30,9)), _col26 (type: decimal(31,10)), _col27 (type: decimal(32,11)), _col28 (type: decimal(33,12)), _col29 (type: decimal(34,13)), _col31 (type: decimal(35,14)), _col32 (type: decimal(36,15)), _col33 (type: decimal(37,16))
             Execution mode: vectorized, llap
@@ -573,7 +573,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: decimal(30,9))
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
index 84022f8..f17583f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
@@ -157,7 +157,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out b/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out
index f582ca3..81dfac6 100644
--- a/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_empty_where.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 2888 Data size: 8628 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -111,7 +111,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
@@ -217,7 +217,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2888 Data size: 8628 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -266,7 +266,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
@@ -380,7 +380,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 2888 Data size: 8628 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -429,7 +429,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
@@ -543,7 +543,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 2888 Data size: 8628 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -592,7 +592,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
index 3b3b801..ffeab2c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
@@ -69,7 +69,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -109,7 +109,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
index 7e21493..5bfa9b5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
@@ -69,7 +69,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -109,7 +109,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkStringOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
index 5063f06..0242cbd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
@@ -159,7 +159,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out
index cfc87fb..1c67983 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out
@@ -102,7 +102,7 @@ STAGE PLANS:
                               className: VectorReduceSinkMultiKeyOperator
                               keyExpressions: ConstantVectorExpression(val 1) -> 4:long
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 6 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col3 (type: bigint)
             Execution mode: vectorized, llap
@@ -266,7 +266,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12 Data size: 80 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col3 (type: bigint)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
index 1b1ec9e..7bfbd6f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
@@ -97,7 +97,7 @@ STAGE PLANS:
                               Reduce Sink Vectorization:
                                   className: VectorReduceSinkObjectHashOperator
                                   native: true
-                                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                               Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                               value expressions: _col1 (type: string)
             Execution mode: vectorized, llap
@@ -142,7 +142,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized, llap
@@ -181,7 +181,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkStringOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
@@ -247,7 +247,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkObjectHashOperator
                       native: true
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: bigint), _col1 (type: bigint)
         Reducer 6 
@@ -287,7 +287,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkStringOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 205 Data size: 18655 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col1 (type: boolean)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
index 0ba6195..39e81f3 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
@@ -278,10 +278,9 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkLongOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized, llap
@@ -319,10 +318,9 @@ STAGE PLANS:
                   key expressions: _col0 (type: int)
                   sort order: +
                   Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false
+                      className: VectorReduceSinkObjectHashOperator
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 500 Data size: 44138 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
         Reducer 3 
@@ -481,7 +479,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -541,7 +539,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Execution mode: vectorized, llap
@@ -763,7 +761,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: int)
             Execution mode: vectorized, llap
@@ -826,7 +824,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint), _col2 (type: bigint)
         Reducer 3 
@@ -1049,7 +1047,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: int)
             Execution mode: vectorized, llap
@@ -1112,7 +1110,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col2 (type: bigint)
         Reducer 3 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
index 9d51982..92a6a6c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
@@ -183,7 +183,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -313,7 +313,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out b/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
index f06086e..32d1001 100644
--- a/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
@@ -50,7 +50,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 4587 Data size: 857712 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: string)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out b/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
index 029b3f5..efd49cd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
@@ -203,7 +203,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
-                        nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.vectorized.execution.reducesink.new.enabled IS false
                     Statistics: Num rows: 200 Data size: 74200 Basic stats: COMPLETE Column stats: NONE
                     value expressions: cd_demo_sk (type: int), cd_marital_status (type: string)
@@ -268,7 +268,7 @@ STAGE PLANS:
                             Reduce Sink Vectorization:
                                 className: VectorReduceSinkOperator
                                 native: false
-                                nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                                nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                                 nativeConditionsNotMet: hive.vectorized.execution.reducesink.new.enabled IS false
                             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                             value expressions: _col0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out b/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
index fc7b9a3..a1c5f81 100644
--- a/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
@@ -153,7 +153,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -317,7 +317,7 @@ STAGE PLANS:
                           Reduce Sink Vectorization:
                               className: VectorReduceSinkLongOperator
                               native: true
-                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -511,7 +511,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -599,7 +599,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -832,7 +832,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -996,7 +996,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1160,7 +1160,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1248,7 +1248,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap
@@ -1412,7 +1412,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkLongOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 91 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
             Execution mode: vectorized, llap

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
index debf5ab..8d4f12e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
@@ -91,7 +91,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col2 (type: interval_year_month), _col4 (type: interval_day_time)
             Execution mode: vectorized, llap
@@ -219,7 +219,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col5 (type: interval_year_month), _col6 (type: interval_year_month)
             Execution mode: vectorized, llap
@@ -355,7 +355,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col5 (type: interval_day_time), _col6 (type: interval_day_time)
             Execution mode: vectorized, llap
@@ -503,7 +503,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp)
             Execution mode: vectorized, llap
@@ -662,7 +662,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp)
             Execution mode: vectorized, llap
@@ -803,7 +803,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
             Execution mode: vectorized, llap
@@ -926,7 +926,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
             Execution mode: vectorized, llap
@@ -1055,7 +1055,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time), _col5 (type: interval_day_time), _col6 (type: interval_day_time)
             Execution mode: vectorized, llap


[22/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
index 8268869..34c60c0 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
@@ -80,7 +80,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)), _col9 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
index 627acfd..56d2e1f 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
@@ -59,8 +59,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_decimal_round.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/vector_decimal_round.q.out
index de49c17..d92b6c2 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: decimal(11,0))
@@ -150,7 +150,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col0 (type: decimal(10,0))
@@ -418,7 +418,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: decimal(11,0))
@@ -501,7 +501,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col0 (type: decimal(10,0))

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
index 5d1d05e..535448a 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
@@ -71,7 +71,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0))
@@ -204,7 +204,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(22,1)), _col3 (type: decimal(23,2)), _col4 (type: decimal(24,3)), _col5 (type: decimal(25,4)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(22,1)), _col13 (type: decimal(23,2)), _col14 (type: decimal(24,3)), _col15 (type: decimal(25,4)), _col16 (type: decimal(21,0)), _col17 (type: decimal(21,0)), _col18 (type: decimal(21,0)), _col19 (type: decimal(21,0))
@@ -364,7 +364,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: decimal(21,0)), _col2 (type: decimal(21,0)), _col3 (type: decimal(21,0)), _col4 (type: decimal(21,0)), _col5 (type: decimal(21,0)), _col6 (type: decimal(21,0)), _col7 (type: decimal(21,0)), _col8 (type: decimal(21,0)), _col9 (type: decimal(21,0)), _col10 (type: decimal(21,0)), _col11 (type: decimal(21,0)), _col12 (type: decimal(21,0)), _col13 (type: decimal(21,0)), _col14 (type: decimal(21,0)), _col15 (type: decimal(21,0)), _col16 (type: decimal(21,0)), _col17 (type: decimal(22,1)), _col18 (type: decimal(23,2)), _col19 (type: decimal(24,3)), _col20 (type: decimal(25,4)), _col21 (type: decimal(26,5)), _col22 (type: decimal(27,6)), _col23 (type: decimal(28,7)), _col24 (type: decimal(29,8)), _col25 (type: decimal(30,9)), _col26 (type: decimal(31,10)), _col27 (type: decimal(32,11)), _col28 (type: decimal(33,12)), _col29 (type: decimal(34,13)), _col31 (type: decimal(35,14)), _col32 (type: decimal(36,15)), _col33 (type: decimal(37,16))
@@ -513,7 +513,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: decimal(30,9))

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/vector_distinct_2.q.out
index 4bf3f0f..b6e9527 100644
--- a/ql/src/test/results/clientpositive/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_distinct_2.q.out
@@ -151,7 +151,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_empty_where.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_empty_where.q.out b/ql/src/test/results/clientpositive/vector_empty_where.q.out
index a1fb19d..b2dec6d 100644
--- a/ql/src/test/results/clientpositive/vector_empty_where.q.out
+++ b/ql/src/test/results/clientpositive/vector_empty_where.q.out
@@ -56,7 +56,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -155,7 +155,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                   Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -262,7 +262,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -369,7 +369,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby4.q.out b/ql/src/test/results/clientpositive/vector_groupby4.q.out
index 799797d..9de8e6e 100644
--- a/ql/src/test/results/clientpositive/vector_groupby4.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby4.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -109,7 +109,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby6.q.out b/ql/src/test/results/clientpositive/vector_groupby6.q.out
index 6fee467..25cf5b2 100644
--- a/ql/src/test/results/clientpositive/vector_groupby6.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby6.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -109,7 +109,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/vector_groupby_3.q.out
index f4be253..9a1256b 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_3.q.out
@@ -153,7 +153,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
index df1d435..addbdeb 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
@@ -63,7 +63,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint), _col1 (type: bigint)
@@ -244,7 +244,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 366 Data size: 10110 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: string)
@@ -349,7 +349,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
index 43ce596..bc59510 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
@@ -274,8 +274,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
@@ -321,8 +321,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 500 Data size: 44138 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
@@ -469,7 +469,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -529,7 +529,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -743,7 +743,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: int)
@@ -805,7 +805,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: bigint), _col2 (type: bigint)
@@ -1020,7 +1020,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: int)
@@ -1082,7 +1082,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 250 Data size: 22069 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col2 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
index a4199ed..3d35fbf 100644
--- a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
@@ -177,7 +177,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -291,7 +291,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_if_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_if_expr.q.out b/ql/src/test/results/clientpositive/vector_if_expr.q.out
index 043940b..2f1cf0a 100644
--- a/ql/src/test/results/clientpositive/vector_if_expr.q.out
+++ b/ql/src/test/results/clientpositive/vector_if_expr.q.out
@@ -44,7 +44,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_include_no_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_include_no_sel.q.out b/ql/src/test/results/clientpositive/vector_include_no_sel.q.out
index e5e3a52..8c8ef80 100644
--- a/ql/src/test/results/clientpositive/vector_include_no_sel.q.out
+++ b/ql/src/test/results/clientpositive/vector_include_no_sel.q.out
@@ -250,7 +250,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkOperator
                           native: false
-                          nativeConditionsMet: No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           nativeConditionsNotMet: hive.vectorized.execution.reducesink.new.enabled IS false, hive.execution.engine mr IN [tez, spark] IS false
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_interval_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_1.q.out b/ql/src/test/results/clientpositive/vector_interval_1.q.out
index f53a2c2..2a398ae 100644
--- a/ql/src/test/results/clientpositive/vector_interval_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_1.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col2 (type: interval_year_month), _col4 (type: interval_day_time)
@@ -194,7 +194,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col5 (type: interval_year_month), _col6 (type: interval_year_month)
@@ -311,7 +311,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col5 (type: interval_day_time), _col6 (type: interval_day_time)
@@ -440,7 +440,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp)
@@ -581,7 +581,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp)
@@ -704,7 +704,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
@@ -809,7 +809,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
@@ -920,7 +920,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time), _col5 (type: interval_day_time), _col6 (type: interval_day_time)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
index 75250e3..b67231c 100644
--- a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
@@ -93,7 +93,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date)
@@ -258,7 +258,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time)
@@ -423,7 +423,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
@@ -583,8 +583,8 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 50 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
                 TopN Hash Memory Usage: 0.1
       Execution mode: vectorized
@@ -701,7 +701,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)
@@ -868,7 +868,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: timestamp), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time)
@@ -1035,7 +1035,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
index 3e96d10..82bef24 100644
--- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -288,7 +288,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
index 018a193..1d13a65 100644
--- a/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
+++ b/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
@@ -70,8 +70,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: tinyint)
@@ -173,8 +173,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_order_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_order_null.q.out b/ql/src/test/results/clientpositive/vector_order_null.q.out
index ca0ea30..d65b3ec 100644
--- a/ql/src/test/results/clientpositive/vector_order_null.q.out
+++ b/ql/src/test/results/clientpositive/vector_order_null.q.out
@@ -99,7 +99,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -193,7 +193,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -287,7 +287,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -381,7 +381,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -475,7 +475,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -569,7 +569,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -663,7 +663,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -757,7 +757,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -851,7 +851,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -945,7 +945,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -1039,7 +1039,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/vector_orderby_5.q.out
index 7d27526..b85eb75 100644
--- a/ql/src/test/results/clientpositive/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/vector_orderby_5.q.out
@@ -154,7 +154,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
@@ -202,7 +202,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
index 87ff0c4..dc2010d 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
@@ -699,7 +699,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkOperator
                           native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                       Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint), _col1 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
index f8e757e..2d2c2b5 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
@@ -346,7 +346,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkOperator
                           native: false
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                       Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint), _col1 (type: bigint)


[07/50] [abbrv] hive git commit: HIVE-15229: 'like any' and 'like all' operators in hive (Simanchal Das via Carl Steinbach)

Posted by we...@apache.org.
HIVE-15229: 'like any' and 'like all' operators in hive (Simanchal Das via Carl Steinbach)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/740779f6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/740779f6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/740779f6

Branch: refs/heads/hive-14535
Commit: 740779f66a4678324428ca0c240ae3ca44a00974
Parents: ed6501e
Author: Carl Steinbach <cw...@apache.org>
Authored: Wed May 3 14:46:31 2017 -0700
Committer: Carl Steinbach <cw...@apache.org>
Committed: Wed May 3 14:46:31 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   2 +
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |   1 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |   2 +
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |   6 +
 .../hive/ql/parse/TypeCheckProcFactory.java     |   3 +
 .../hive/ql/udf/generic/GenericUDFLikeAll.java  | 133 +++++++++++++
 .../hive/ql/udf/generic/GenericUDFLikeAny.java  | 134 +++++++++++++
 .../ql/udf/generic/TestGenericUDFLikeAll.java   |  88 +++++++++
 .../ql/udf/generic/TestGenericUDFLikeAny.java   |  87 +++++++++
 .../queries/clientnegative/udf_likeall_wrong1.q |   2 +
 .../queries/clientnegative/udf_likeany_wrong1.q |   2 +
 .../test/queries/clientpositive/udf_likeall.q   |  57 ++++++
 .../test/queries/clientpositive/udf_likeany.q   |  57 ++++++
 .../clientnegative/udf_likeall_wrong1.q.out     |   1 +
 .../clientnegative/udf_likeany_wrong1.q.out     |   1 +
 .../results/clientpositive/show_functions.q.out |   2 +
 .../results/clientpositive/udf_likeall.q.out    | 187 +++++++++++++++++++
 .../results/clientpositive/udf_likeany.q.out    | 187 +++++++++++++++++++
 18 files changed, 952 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 8dc5f2e..1b556ac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -272,6 +272,8 @@ public final class FunctionRegistry {
     system.registerGenericUDF("initcap", GenericUDFInitCap.class);
 
     system.registerUDF("like", UDFLike.class, true);
+    system.registerGenericUDF("likeany", GenericUDFLikeAny.class);
+    system.registerGenericUDF("likeall", GenericUDFLikeAll.class);
     system.registerGenericUDF("rlike", GenericUDFRegExp.class);
     system.registerGenericUDF("regexp", GenericUDFRegExp.class);
     system.registerUDF("regexp_replace", UDFRegExpReplace.class, false);

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index 0721b92..190b66b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -49,6 +49,7 @@ KW_AND : 'AND';
 KW_OR : 'OR';
 KW_NOT : 'NOT' | '!';
 KW_LIKE : 'LIKE';
+KW_ANY : 'ANY';
 
 KW_IF : 'IF';
 KW_EXISTS : 'EXISTS';

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index d98a663..ca639d3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -397,6 +397,8 @@ TOK_OPERATOR;
 TOK_EXPRESSION;
 TOK_DETAIL;
 TOK_BLOCKING;
+TOK_LIKEANY;
+TOK_LIKEALL;
 }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index 8598fae..645ced9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -567,6 +567,12 @@ precedenceSimilarExpressionAtom[CommonTree t]
     |
     KW_BETWEEN (min=precedenceBitwiseOrExpression) KW_AND (max=precedenceBitwiseOrExpression)
     -> ^(TOK_FUNCTION Identifier["between"] KW_FALSE {$t} $min $max)
+    |
+    KW_LIKE KW_ANY (expr=expressionsInParenthesis[false])
+    -> ^(TOK_FUNCTION TOK_LIKEANY {$t} {$expr.tree})
+    |
+    KW_LIKE KW_ALL (expr=expressionsInParenthesis[false])
+    -> ^(TOK_FUNCTION TOK_LIKEALL {$t} {$expr.tree})
     ;
 
 precedenceSimilarExpressionIn[CommonTree t]

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 8f8eab0..c3227c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -198,6 +198,7 @@ public class TypeCheckProcFactory {
         + HiveParser.KW_WHEN + "%|" + HiveParser.KW_IN + "%|"
         + HiveParser.KW_ARRAY + "%|" + HiveParser.KW_MAP + "%|"
         + HiveParser.KW_STRUCT + "%|" + HiveParser.KW_EXISTS + "%|"
+        + HiveParser.TOK_LIKEALL + "%|" + HiveParser.TOK_LIKEANY + "%|"
         + HiveParser.TOK_SUBQUERY_OP_NOTIN + "%"),
         tf.getStrExprProcessor());
     opRules.put(new RuleRegExp("R4", HiveParser.KW_TRUE + "%|"
@@ -724,6 +725,8 @@ public class TypeCheckProcFactory {
       specialFunctionTextHashMap = new HashMap<Integer, String>();
       specialFunctionTextHashMap.put(HiveParser.TOK_ISNULL, "isnull");
       specialFunctionTextHashMap.put(HiveParser.TOK_ISNOTNULL, "isnotnull");
+      specialFunctionTextHashMap.put(HiveParser.TOK_LIKEANY, "likeany");
+      specialFunctionTextHashMap.put(HiveParser.TOK_LIKEALL, "likeall");
       conversionFunctionTextHashMap = new HashMap<Integer, String>();
       conversionFunctionTextHashMap.put(HiveParser.TOK_BOOLEAN,
           serdeConstants.BOOLEAN_TYPE_NAME);

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLikeAll.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLikeAll.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLikeAll.java
new file mode 100644
index 0000000..2a92689
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLikeAll.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.UDFLike;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping;
+import org.apache.hadoop.io.BooleanWritable;
+import org.apache.hadoop.io.Text;
+
+/**
+ * GenericUDFLikeAll is return true if a text(column value) matches to all patterns
+ *
+ * Example usage: SELECT key FROM src WHERE key like all ('%ab%', 'a%','b%','abc');
+ *
+ * LIKE ALL returns true if test matches all patterns patternN.
+ * Returns NULL if the expression on the left hand side is NULL or if one of the patterns in the list is NULL.
+ *
+ */
+
+@Description(
+    name = "like all",
+    value = "test _FUNC_(pattern1, pattern2...) - returns true if test matches all patterns patternN. "
+        + " Returns NULL if the expression on the left hand side is NULL or if one of the patterns in the list is NULL.")
+public class GenericUDFLikeAll extends GenericUDF {
+  private transient PrimitiveCategory[] inputTypes;
+  private transient Converter[] converters;
+  private transient boolean isConstantNullPatternContain;
+  private boolean isAllPatternsConstant = true;
+  private final BooleanWritable bw = new BooleanWritable();
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    if (arguments.length < 2) {
+      throw new UDFArgumentLengthException("The like all operator requires at least one pattern for matching, got "
+          + (arguments.length - 1));
+    }
+    inputTypes = new PrimitiveCategory[arguments.length];
+    converters = new Converter[arguments.length];
+
+    /**expects string or null arguments */
+    for (int idx = 0; idx < arguments.length; idx++) {
+      checkArgPrimitive(arguments, idx);
+      checkArgGroups(arguments, idx, inputTypes, PrimitiveGrouping.STRING_GROUP, PrimitiveGrouping.VOID_GROUP);
+      PrimitiveCategory inputType = ((PrimitiveObjectInspector) arguments[idx]).getPrimitiveCategory();
+      if (arguments[idx] instanceof ConstantObjectInspector && idx != 0) {
+        Object constValue = ((ConstantObjectInspector) arguments[idx]).getWritableConstantValue();
+        if (!isConstantNullPatternContain && constValue == null) {
+          isConstantNullPatternContain = true;
+        }
+      } else if (idx != 0 && isAllPatternsConstant) {
+        isAllPatternsConstant = false;
+      }
+      converters[idx] = ObjectInspectorConverters.getConverter(arguments[idx], getOutputOI(inputType));
+      inputTypes[idx] = inputType;
+    }
+    return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    bw.set(true);
+
+    /**If field value or any constant string pattern value is null then return null*/
+    if (arguments[0].get() == null || isConstantNullPatternContain) {
+      return null;
+    }
+    /**If all patterns are constant string and no pattern have null value the do short circuit boolean check
+     * Else evaluate all patterns if any pattern contains null value then return null otherwise at last return matching result
+     * */
+    Text columnValue = (Text) converters[0].convert(arguments[0].get());
+    Text pattern = new Text();
+    UDFLike likeUdf = new UDFLike();
+    for (int idx = 1; idx < arguments.length; idx++) {
+      if (arguments[idx].get() == null) {
+        return null;
+      }
+      pattern.set((Text) converters[idx].convert(arguments[idx].get()));
+      if (!likeUdf.evaluate(columnValue, pattern).get() && bw.get()) {
+        bw.set(false);
+        if (isAllPatternsConstant) {
+          return bw;
+        }
+      }
+    }
+    return bw;
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    return getStandardDisplayString("likeall", children);
+  }
+
+  private ObjectInspector getOutputOI(PrimitiveCategory inputType) {
+    switch (inputType) {
+      case CHAR:
+      case STRING:
+      case VARCHAR:
+        return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+      case VOID:
+        return PrimitiveObjectInspectorFactory.writableVoidObjectInspector;
+      default:
+        break;
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLikeAny.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLikeAny.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLikeAny.java
new file mode 100644
index 0000000..65413a0
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLikeAny.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.UDFLike;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping;
+import org.apache.hadoop.io.BooleanWritable;
+import org.apache.hadoop.io.Text;
+
+
+/**
+ * GenericUDFLikeAll is return true if a text(column value) matches to any patterns
+ *
+ * Example usage: SELECT key FROM src WHERE key like any ('%ab%', 'a%','b%','abc');
+ *
+ * like ANY returns true if test matches any patterns patternN.
+ * Returns NULL if the expression on the left hand side is NULL or if one of the patterns in the list is NULL.
+ */
+
+@Description(
+    name = "like any",
+    value = "test _FUNC_(pattern1, pattern2...) - returns true if test matches any patterns patternN."
+        + " Returns NULL if the expression on the left hand side is NULL or if one of the patterns in the list is NULL. ")
+public class GenericUDFLikeAny extends GenericUDF {
+  private transient PrimitiveCategory[] inputTypes;
+  private transient Converter[] converters;
+  private transient boolean isConstantNullPatternContain;
+  private boolean isAllPatternsConstant = true;
+  private final BooleanWritable bw = new BooleanWritable();
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    if (arguments.length < 2) {
+      throw new UDFArgumentLengthException("The like any operator requires at least one pattern for matching, got "
+          + (arguments.length - 1));
+    }
+    inputTypes = new PrimitiveCategory[arguments.length];
+    converters = new Converter[arguments.length];
+
+    /**expects string and null arguments */
+    for (int idx = 0; idx < arguments.length; idx++) {
+      checkArgPrimitive(arguments, idx);
+      checkArgGroups(arguments, idx, inputTypes, PrimitiveGrouping.STRING_GROUP, PrimitiveGrouping.VOID_GROUP);
+      PrimitiveCategory inputType = ((PrimitiveObjectInspector) arguments[idx]).getPrimitiveCategory();
+      if (arguments[idx] instanceof ConstantObjectInspector && idx != 0) {
+        Object constValue = ((ConstantObjectInspector) arguments[idx]).getWritableConstantValue();
+        if (!isConstantNullPatternContain && constValue == null) {
+          isConstantNullPatternContain = true;
+        }
+      } else if (idx != 0 && isAllPatternsConstant) {
+        isAllPatternsConstant = false;
+      }
+      converters[idx] = ObjectInspectorConverters.getConverter(arguments[idx], getOutputOI(inputType));
+      inputTypes[idx] = inputType;
+    }
+    return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    bw.set(false);
+
+    /**If field value or any constant string pattern value is null then return null*/
+    if (arguments[0].get() == null || isConstantNullPatternContain) {
+      return null;
+    }
+
+    /**If all patterns are constant string and no pattern have null value the do short circuit boolean check
+     * Else evaluate all patterns if any pattern contains null value then return null otherwise at last return matching result
+     * */
+    Text columnValue = (Text) converters[0].convert(arguments[0].get());
+    Text pattern = new Text();
+    UDFLike likeUdf = new UDFLike();
+    for (int idx = 1; idx < arguments.length; idx++) {
+      if (arguments[idx].get() == null) {
+        return null;
+      }
+      pattern.set(converters[idx].convert(arguments[idx].get()).toString());
+      if (likeUdf.evaluate(columnValue, pattern).get() && !bw.get()) {
+        bw.set(true);
+        if (isAllPatternsConstant) {
+          return bw;
+        }
+      }
+    }
+    return bw;
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    return getStandardDisplayString("likeany", children);
+  }
+
+  private ObjectInspector getOutputOI(PrimitiveCategory inputType) {
+    switch (inputType) {
+      case CHAR:
+      case STRING:
+      case VARCHAR:
+        return PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+      case VOID:
+        return PrimitiveObjectInspectorFactory.writableVoidObjectInspector;
+      default:
+        break;
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLikeAll.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLikeAll.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLikeAll.java
new file mode 100644
index 0000000..1078de2
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLikeAll.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.hive.common.type.HiveVarchar;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject;
+import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.BooleanWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+
+public class TestGenericUDFLikeAll {
+  GenericUDFLikeAll udf = null;
+
+  @Test
+  public void testTrue() throws HiveException {
+    udf = new GenericUDFLikeAll();
+
+    ObjectInspector valueOIOne = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOITwo = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOIThree = PrimitiveObjectInspectorFactory.writableHiveVarcharObjectInspector;
+    ObjectInspector[] arguments = { valueOIOne, valueOITwo, valueOIThree };
+
+    udf.initialize(arguments);
+    DeferredJavaObject valueObjOne = new DeferredJavaObject(new Text("abc"));
+    DeferredJavaObject valueObjTwo = new DeferredJavaObject(new Text("%b%"));
+
+    HiveVarchar vc = new HiveVarchar();
+    vc.setValue("a%");
+    GenericUDF.DeferredJavaObject[] args =
+        { valueObjOne, valueObjTwo, new GenericUDF.DeferredJavaObject(new HiveVarcharWritable(vc)) };
+
+    BooleanWritable output = (BooleanWritable) udf.evaluate(args);
+
+    assertEquals(true, output.get());
+
+  }
+
+  @Test(expected = UDFArgumentException.class)
+  public void expectException() throws HiveException {
+    udf = new GenericUDFLikeAll();
+    ObjectInspector valueOIOne = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector[] arguments = { valueOIOne };
+    udf.initialize(arguments);
+  }
+
+  @Test
+  public void testNull() throws HiveException {
+    udf = new GenericUDFLikeAll();
+    ObjectInspector valueOIOne = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOITwo = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOIThree = PrimitiveObjectInspectorFactory.writableVoidObjectInspector;
+    ObjectInspector[] arguments = { valueOIOne, valueOITwo, valueOIThree };
+    udf.initialize(arguments);
+    DeferredObject valueObjOne = new DeferredJavaObject(new Text("linkedin"));
+    DeferredObject valueObjTwo = new DeferredJavaObject(new Text("%oo%"));
+    DeferredObject valueObjThree = new DeferredJavaObject(NullWritable.get());
+    DeferredObject[] args = { valueObjOne, valueObjTwo, valueObjThree };
+    BooleanWritable output = (BooleanWritable) udf.evaluate(args);
+    assertEquals(null, output);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLikeAny.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLikeAny.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLikeAny.java
new file mode 100644
index 0000000..0ce7736
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFLikeAny.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hive.common.type.HiveVarchar;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject;
+import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.BooleanWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+
+public class TestGenericUDFLikeAny {
+
+  GenericUDFLikeAny udf = null;
+
+  @Test
+  public void testTrue() throws HiveException {
+    udf = new GenericUDFLikeAny();
+
+    ObjectInspector valueOIOne = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOITwo = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOIThree = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector[] arguments = { valueOIOne, valueOITwo, valueOIThree };
+    udf.initialize(arguments);
+    DeferredJavaObject valueObjOne = new DeferredJavaObject(new Text("abc"));
+    DeferredJavaObject valueObjTwo = new DeferredJavaObject(new Text("%b%"));
+    HiveVarchar vc = new HiveVarchar();
+    vc.setValue("a%");
+    GenericUDF.DeferredJavaObject[] args =
+        { valueObjOne, valueObjTwo, new GenericUDF.DeferredJavaObject(new HiveVarcharWritable(vc)) };
+    BooleanWritable output = (BooleanWritable) udf.evaluate(args);
+    assertEquals(true, output.get());
+
+  }
+
+  @Test(expected = UDFArgumentException.class)
+  public void testExpectException() throws IOException, HiveException {
+    udf = new GenericUDFLikeAny();
+    ObjectInspector valueOIOne = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector[] arguments = { valueOIOne };
+    udf.initialize(arguments);
+    udf.close();
+  }
+
+  @Test
+  public void testNull() throws HiveException {
+    udf = new GenericUDFLikeAny();
+    ObjectInspector valueOIOne = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector valueOITwo = PrimitiveObjectInspectorFactory.writableVoidObjectInspector;
+    ObjectInspector valueOIThree = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
+    ObjectInspector[] arguments = { valueOIOne, valueOITwo, valueOIThree };
+    udf.initialize(arguments);
+    DeferredObject valueObjOne = new DeferredJavaObject(new Text("abc"));
+    DeferredObject valueObjTwo = new DeferredJavaObject(NullWritable.get());
+    DeferredObject valueObjThree = new DeferredJavaObject(new Text("%b%"));
+    DeferredObject[] args = { valueObjOne, valueObjTwo, valueObjThree };
+    BooleanWritable output = (BooleanWritable) udf.evaluate(args);
+    assertEquals(null, output);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/queries/clientnegative/udf_likeall_wrong1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/udf_likeall_wrong1.q b/ql/src/test/queries/clientnegative/udf_likeall_wrong1.q
new file mode 100644
index 0000000..2578ee2
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/udf_likeall_wrong1.q
@@ -0,0 +1,2 @@
+SELECT 120 like all ('a%','%bc%','%c')
+FROM src WHERE src.key = 86;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/queries/clientnegative/udf_likeany_wrong1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/udf_likeany_wrong1.q b/ql/src/test/queries/clientnegative/udf_likeany_wrong1.q
new file mode 100644
index 0000000..fcc0905
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/udf_likeany_wrong1.q
@@ -0,0 +1,2 @@
+SELECT 120 like any ('a%','%bc%','%c')
+FROM src WHERE src.key = 86;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/queries/clientpositive/udf_likeall.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_likeall.q b/ql/src/test/queries/clientpositive/udf_likeall.q
new file mode 100644
index 0000000..a580ae8
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/udf_likeall.q
@@ -0,0 +1,57 @@
+SELECT 'abc' like all ('a%','%bc%','%c')
+FROM src WHERE src.key = 86;
+
+SELECT 'abc' LIKE ALL ('z%','%y','%dx%')
+FROM src WHERE src.key = 86;
+
+SELECT 'abc' like all ('abc')
+FROM src WHERE src.key = 86;
+
+DESCRIBE FUNCTION likeall;
+DESCRIBE FUNCTION EXTENDED likeall;
+
+DROP TABLE IF EXISTS like_all_table;
+
+CREATE TABLE like_all_table
+STORED AS TEXTFILE
+AS
+SELECT "google" as company,"%gl%" as pat
+UNION ALL
+SELECT "facebook" as company,"%bo%" as pat
+UNION ALL
+SELECT "linkedin" as company,"%in" as pat
+;
+
+select company from like_all_table where company like all ('%oo%','%go%') ;
+
+select company from like_all_table where company like all ('microsoft','%yoo%') ;
+
+select
+    company,
+    CASE
+        WHEN company like all ('%oo%','%go%') THEN 'Y'
+        ELSE 'N'
+    END AS is_available,
+    CASE
+        WHEN company like all ('%oo%','go%') OR company like all ('%in','ms%') THEN 'Y'
+        ELSE 'N'
+    END AS mix
+From like_all_table ;
+
+--Mix test with constant pattern and column value
+select company from like_all_table where company like all ('%oo%',pat) ;
+
+-- not like all test
+
+select company from like_all_table where company not like all ('%oo%','%in','fa%') ;
+select company from like_all_table where company not like all ('microsoft','%yoo%') ;
+
+-- null test
+
+select company from like_all_table where company like all ('%oo%',null) ;
+
+select company from like_all_table where company not like all ('%oo%',null) ;
+
+select company from like_all_table where company not like all (null,null) ;
+
+select company from like_all_table where company not like all (null,null) ;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/queries/clientpositive/udf_likeany.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_likeany.q b/ql/src/test/queries/clientpositive/udf_likeany.q
new file mode 100644
index 0000000..b3e6dc1
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/udf_likeany.q
@@ -0,0 +1,57 @@
+SELECT 'abc' like any ('a%','%d','%cd%')
+FROM src WHERE src.key = 86;
+
+SELECT 'abc' LIKE ANY ('z%','%y','%dx%')
+FROM src WHERE src.key = 86;
+
+SELECT 'abc' like any ('abc')
+FROM src WHERE src.key = 86;
+
+DESCRIBE FUNCTION likeany;
+DESCRIBE FUNCTION EXTENDED likeany;
+
+DROP TABLE IF EXISTS like_any_table;
+
+CREATE TABLE like_any_table
+STORED AS TEXTFILE
+AS
+SELECT "google" as company,"%oo%" as pat
+UNION ALL
+SELECT "facebook" as company,"%oo%" as pat
+UNION ALL
+SELECT "linkedin" as company,"%in" as pat
+;
+
+select company from like_any_table where company like any ('%oo%','%in','fa%') ;
+
+select company from like_any_table where company like any ('microsoft','%yoo%') ;
+
+select
+    company,
+    CASE
+        WHEN company like any ('%oo%','%in','fa%') THEN 'Y'
+        ELSE 'N'
+    END AS is_available,
+    CASE
+        WHEN company like any ('%oo%','fa%') OR company like any ('%in','ms%') THEN 'Y'
+        ELSE 'N'
+    END AS mix
+From like_any_table;
+
+--Mix test with constant pattern and column value
+select company from like_any_table where company like any ('%zz%',pat) ;
+
+-- not like any test
+
+select company from like_any_table where company not like any ('%oo%','%in','fa%') ;
+select company from like_any_table where company not like any ('microsoft','%yoo%') ;
+
+-- null test
+
+select company from like_any_table where company like any ('%oo%',null) ;
+
+select company from like_any_table where company not like any ('%oo%',null) ;
+
+select company from like_any_table where company like any (null,null) ;
+
+select company from like_any_table where company not like any (null,null) ;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/results/clientnegative/udf_likeall_wrong1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/udf_likeall_wrong1.q.out b/ql/src/test/results/clientnegative/udf_likeall_wrong1.q.out
new file mode 100644
index 0000000..bf48229
--- /dev/null
+++ b/ql/src/test/results/clientnegative/udf_likeall_wrong1.q.out
@@ -0,0 +1 @@
+FAILED: SemanticException [Error 10016]: Line 1:7 Argument type mismatch '120': likeall only takes STRING_GROUP, VOID_GROUP types as 1st argument, got INT

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/results/clientnegative/udf_likeany_wrong1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/udf_likeany_wrong1.q.out b/ql/src/test/results/clientnegative/udf_likeany_wrong1.q.out
new file mode 100644
index 0000000..3b7f41d
--- /dev/null
+++ b/ql/src/test/results/clientnegative/udf_likeany_wrong1.q.out
@@ -0,0 +1 @@
+FAILED: SemanticException [Error 10016]: Line 1:7 Argument type mismatch '120': likeany only takes STRING_GROUP, VOID_GROUP types as 1st argument, got INT

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/results/clientpositive/show_functions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_functions.q.out b/ql/src/test/results/clientpositive/show_functions.q.out
index ac5ca41..e638638 100644
--- a/ql/src/test/results/clientpositive/show_functions.q.out
+++ b/ql/src/test/results/clientpositive/show_functions.q.out
@@ -134,6 +134,8 @@ least
 length
 levenshtein
 like
+likeall
+likeany
 ln
 locate
 log

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/results/clientpositive/udf_likeall.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_likeall.q.out b/ql/src/test/results/clientpositive/udf_likeall.q.out
new file mode 100644
index 0000000..7a089c9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/udf_likeall.q.out
@@ -0,0 +1,187 @@
+PREHOOK: query: SELECT 'abc' like all ('a%','%bc%','%c')
+FROM src WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT 'abc' like all ('a%','%bc%','%c')
+FROM src WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+true
+PREHOOK: query: SELECT 'abc' LIKE ALL ('z%','%y','%dx%')
+FROM src WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT 'abc' LIKE ALL ('z%','%y','%dx%')
+FROM src WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+false
+PREHOOK: query: SELECT 'abc' like all ('abc')
+FROM src WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT 'abc' like all ('abc')
+FROM src WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+true
+PREHOOK: query: DESCRIBE FUNCTION likeall
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION likeall
+POSTHOOK: type: DESCFUNCTION
+test likeall(pattern1, pattern2...) - returns true if test matches all patterns patternN.  Returns NULL if the expression on the left hand side is NULL or if one of the patterns in the list is NULL.
+PREHOOK: query: DESCRIBE FUNCTION EXTENDED likeall
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION EXTENDED likeall
+POSTHOOK: type: DESCFUNCTION
+test likeall(pattern1, pattern2...) - returns true if test matches all patterns patternN.  Returns NULL if the expression on the left hand side is NULL or if one of the patterns in the list is NULL.
+Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFLikeAll
+Function type:BUILTIN
+PREHOOK: query: DROP TABLE IF EXISTS like_all_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS like_all_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE like_all_table
+STORED AS TEXTFILE
+AS
+SELECT "google" as company,"%gl%" as pat
+UNION ALL
+SELECT "facebook" as company,"%bo%" as pat
+UNION ALL
+SELECT "linkedin" as company,"%in" as pat
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: database:default
+PREHOOK: Output: default@like_all_table
+POSTHOOK: query: CREATE TABLE like_all_table
+STORED AS TEXTFILE
+AS
+SELECT "google" as company,"%gl%" as pat
+UNION ALL
+SELECT "facebook" as company,"%bo%" as pat
+UNION ALL
+SELECT "linkedin" as company,"%in" as pat
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@like_all_table
+POSTHOOK: Lineage: like_all_table.company EXPRESSION []
+POSTHOOK: Lineage: like_all_table.pat EXPRESSION []
+PREHOOK: query: select company from like_all_table where company like all ('%oo%','%go%')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company like all ('%oo%','%go%')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+google
+PREHOOK: query: select company from like_all_table where company like all ('microsoft','%yoo%')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company like all ('microsoft','%yoo%')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+PREHOOK: query: select
+    company,
+    CASE
+        WHEN company like all ('%oo%','%go%') THEN 'Y'
+        ELSE 'N'
+    END AS is_available,
+    CASE
+        WHEN company like all ('%oo%','go%') OR company like all ('%in','ms%') THEN 'Y'
+        ELSE 'N'
+    END AS mix
+From like_all_table
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+    company,
+    CASE
+        WHEN company like all ('%oo%','%go%') THEN 'Y'
+        ELSE 'N'
+    END AS is_available,
+    CASE
+        WHEN company like all ('%oo%','go%') OR company like all ('%in','ms%') THEN 'Y'
+        ELSE 'N'
+    END AS mix
+From like_all_table
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+google	Y	Y
+facebook	N	N
+linkedin	N	N
+PREHOOK: query: select company from like_all_table where company like all ('%oo%',pat)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company like all ('%oo%',pat)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+google
+facebook
+PREHOOK: query: select company from like_all_table where company not like all ('%oo%','%in','fa%')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company not like all ('%oo%','%in','fa%')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+google
+facebook
+linkedin
+PREHOOK: query: select company from like_all_table where company not like all ('microsoft','%yoo%')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company not like all ('microsoft','%yoo%')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+google
+facebook
+linkedin
+PREHOOK: query: select company from like_all_table where company like all ('%oo%',null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company like all ('%oo%',null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+PREHOOK: query: select company from like_all_table where company not like all ('%oo%',null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company not like all ('%oo%',null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+PREHOOK: query: select company from like_all_table where company not like all (null,null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company not like all (null,null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+PREHOOK: query: select company from like_all_table where company not like all (null,null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_all_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_all_table where company not like all (null,null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_all_table
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/740779f6/ql/src/test/results/clientpositive/udf_likeany.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_likeany.q.out b/ql/src/test/results/clientpositive/udf_likeany.q.out
new file mode 100644
index 0000000..fc36686
--- /dev/null
+++ b/ql/src/test/results/clientpositive/udf_likeany.q.out
@@ -0,0 +1,187 @@
+PREHOOK: query: SELECT 'abc' like any ('a%','%d','%cd%')
+FROM src WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT 'abc' like any ('a%','%d','%cd%')
+FROM src WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+true
+PREHOOK: query: SELECT 'abc' LIKE ANY ('z%','%y','%dx%')
+FROM src WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT 'abc' LIKE ANY ('z%','%y','%dx%')
+FROM src WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+false
+PREHOOK: query: SELECT 'abc' like any ('abc')
+FROM src WHERE src.key = 86
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT 'abc' like any ('abc')
+FROM src WHERE src.key = 86
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+true
+PREHOOK: query: DESCRIBE FUNCTION likeany
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION likeany
+POSTHOOK: type: DESCFUNCTION
+test likeany(pattern1, pattern2...) - returns true if test matches any patterns patternN. Returns NULL if the expression on the left hand side is NULL or if one of the patterns in the list is NULL. 
+PREHOOK: query: DESCRIBE FUNCTION EXTENDED likeany
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION EXTENDED likeany
+POSTHOOK: type: DESCFUNCTION
+test likeany(pattern1, pattern2...) - returns true if test matches any patterns patternN. Returns NULL if the expression on the left hand side is NULL or if one of the patterns in the list is NULL. 
+Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFLikeAny
+Function type:BUILTIN
+PREHOOK: query: DROP TABLE IF EXISTS like_any_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS like_any_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE like_any_table
+STORED AS TEXTFILE
+AS
+SELECT "google" as company,"%oo%" as pat
+UNION ALL
+SELECT "facebook" as company,"%oo%" as pat
+UNION ALL
+SELECT "linkedin" as company,"%in" as pat
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: database:default
+PREHOOK: Output: default@like_any_table
+POSTHOOK: query: CREATE TABLE like_any_table
+STORED AS TEXTFILE
+AS
+SELECT "google" as company,"%oo%" as pat
+UNION ALL
+SELECT "facebook" as company,"%oo%" as pat
+UNION ALL
+SELECT "linkedin" as company,"%in" as pat
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@like_any_table
+POSTHOOK: Lineage: like_any_table.company EXPRESSION []
+POSTHOOK: Lineage: like_any_table.pat EXPRESSION []
+PREHOOK: query: select company from like_any_table where company like any ('%oo%','%in','fa%')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company like any ('%oo%','%in','fa%')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+google
+facebook
+linkedin
+PREHOOK: query: select company from like_any_table where company like any ('microsoft','%yoo%')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company like any ('microsoft','%yoo%')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+PREHOOK: query: select
+    company,
+    CASE
+        WHEN company like any ('%oo%','%in','fa%') THEN 'Y'
+        ELSE 'N'
+    END AS is_available,
+    CASE
+        WHEN company like any ('%oo%','fa%') OR company like any ('%in','ms%') THEN 'Y'
+        ELSE 'N'
+    END AS mix
+From like_any_table
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+    company,
+    CASE
+        WHEN company like any ('%oo%','%in','fa%') THEN 'Y'
+        ELSE 'N'
+    END AS is_available,
+    CASE
+        WHEN company like any ('%oo%','fa%') OR company like any ('%in','ms%') THEN 'Y'
+        ELSE 'N'
+    END AS mix
+From like_any_table
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+google	Y	Y
+facebook	Y	Y
+linkedin	Y	Y
+PREHOOK: query: select company from like_any_table where company like any ('%zz%',pat)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company like any ('%zz%',pat)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+google
+facebook
+linkedin
+PREHOOK: query: select company from like_any_table where company not like any ('%oo%','%in','fa%')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company not like any ('%oo%','%in','fa%')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+PREHOOK: query: select company from like_any_table where company not like any ('microsoft','%yoo%')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company not like any ('microsoft','%yoo%')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+google
+facebook
+linkedin
+PREHOOK: query: select company from like_any_table where company like any ('%oo%',null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company like any ('%oo%',null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+PREHOOK: query: select company from like_any_table where company not like any ('%oo%',null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company not like any ('%oo%',null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+PREHOOK: query: select company from like_any_table where company like any (null,null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company like any (null,null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+PREHOOK: query: select company from like_any_table where company not like any (null,null)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@like_any_table
+#### A masked pattern was here ####
+POSTHOOK: query: select company from like_any_table where company not like any (null,null)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@like_any_table
+#### A masked pattern was here ####


[06/50] [abbrv] hive git commit: HIVE-15795 : Support Accumulo Index Tables in Hive Accumulo Connector (Mike Fagan, reviewed by Josh Elser) ADDENDUM

Posted by we...@apache.org.
 HIVE-15795 : Support Accumulo Index Tables in Hive Accumulo Connector (Mike Fagan, reviewed by Josh Elser) ADDENDUM


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ed6501ed
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ed6501ed
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ed6501ed

Branch: refs/heads/hive-14535
Commit: ed6501ed39040f308c792f22756d3169f37cb3a4
Parents: 5d45966
Author: sergey <se...@apache.org>
Authored: Wed May 3 13:42:02 2017 -0700
Committer: sergey <se...@apache.org>
Committed: Wed May 3 13:42:02 2017 -0700

----------------------------------------------------------------------
 .../hive/accumulo/AccumuloIndexLexicoder.java       |  8 ++++----
 .../accumulo/predicate/AccumuloRangeGenerator.java  |  2 +-
 .../hive/accumulo/TestAccumuloIndexLexicoder.java   | 16 ++++++++--------
 3 files changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ed6501ed/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java
index 6703570..4ad35f8 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloIndexLexicoder.java
@@ -73,9 +73,9 @@ public final class AccumuloIndexLexicoder {
       case serdeConstants.DOUBLE_TYPE_NAME :
         return DOUBLE_LEXICODER.encode(Double.valueOf(new String(value)));
       case serdeConstants.BIGINT_TYPE_NAME :
-        return LONG_LEXICODER.encode(Long.valueOf(new String(value)));
-      case serdeConstants.DECIMAL_TYPE_NAME :
         return BIG_INTEGER_LEXICODER.encode(new BigInteger(new String(value), 10));
+      case serdeConstants.DECIMAL_TYPE_NAME :
+        return new String(value).getBytes(UTF_8);
       default :
         // return the passed in string value
         return value;
@@ -99,9 +99,9 @@ public final class AccumuloIndexLexicoder {
       case serdeConstants.DOUBLE_TYPE_NAME :
         return DOUBLE_LEXICODER.encode(ByteBuffer.wrap(value).asDoubleBuffer().get());
       case serdeConstants.BIGINT_TYPE_NAME :
-        return LONG_LEXICODER.encode(ByteBuffer.wrap(value).asLongBuffer().get());
-      case serdeConstants.DECIMAL_TYPE_NAME :
         return BIG_INTEGER_LEXICODER.encode(new BigInteger(value));
+      case serdeConstants.DECIMAL_TYPE_NAME :
+        return new String(value).getBytes(UTF_8);
       default :
         return value;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ed6501ed/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
index afdc647..90607ed 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
@@ -347,7 +347,7 @@ public class AccumuloRangeGenerator implements NodeProcessor {
       throws SemanticException {
     Text constText = getConstantText(objInspector);
     byte[] value = constText.toString().getBytes(UTF_8);
-    byte[] encoded = AccumuloIndexLexicoder.encodeValue(value, objInspector.getTypeName(), true);
+    byte[] encoded = AccumuloIndexLexicoder.encodeValue(value, leftHandNode.getTypeString(), true);
     Range range = getRange(genericUdf, leftHandNode, new Text(encoded));
     if (indexScanner != null) {
       return indexScanner.getIndexRowRanges(columnName, range);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed6501ed/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java
index 70362ff..b19f10e 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloIndexLexicoder.java
@@ -112,15 +112,15 @@ public class TestAccumuloIndexLexicoder {
 
   @Test
   public void testBigIntBinary() {
-    byte[] value = ByteBuffer.allocate(8).putLong(1232322323).array();
-    byte[] encoded = new LongLexicoder().encode(1232322323L);
+    byte[] value = new String("1232322323").getBytes(UTF_8);
+    byte[] encoded = new BigIntegerLexicoder().encode(new BigInteger("1232322323", 10));
 
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.BIGINT_TYPE_NAME, false);
+    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.BIGINT_TYPE_NAME, true);
     assertArrayEquals(lex, encoded);
 
     value = new BigInteger( "1232322323", 10 ).toByteArray();
     encoded = new BigIntegerLexicoder().encode(new BigInteger("1232322323", 10 ));
-    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.DECIMAL_TYPE_NAME, false);
+    lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.BIGINT_TYPE_NAME, false);
     assertArrayEquals(lex, encoded);
   }
 
@@ -128,7 +128,7 @@ public class TestAccumuloIndexLexicoder {
   public void testDecimalString() {
     String strVal = "12323232233434";
     byte[] value = strVal.getBytes(UTF_8);
-    byte[] encoded = new BigIntegerLexicoder().encode(new BigInteger(strVal, 10));
+    byte[] encoded = strVal.getBytes(UTF_8);
 
     byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.DECIMAL_TYPE_NAME, true);
     assertArrayEquals(lex, encoded);
@@ -140,10 +140,10 @@ public class TestAccumuloIndexLexicoder {
 
   @Test
   public void testDecimalBinary() {
-    BigInteger value = new BigInteger("12323232233434", 10);
-    byte[] encoded = new BigIntegerLexicoder().encode(value);
+    byte[] value = new BigInteger("12323232233434", 10).toString().getBytes(UTF_8);
+    byte[] encoded = new String(value).getBytes(UTF_8);
 
-    byte[] lex = AccumuloIndexLexicoder.encodeValue(value.toByteArray(), serdeConstants.DECIMAL_TYPE_NAME, false);
+    byte[] lex = AccumuloIndexLexicoder.encodeValue(value, serdeConstants.DECIMAL_TYPE_NAME, false);
     assertArrayEquals(lex, encoded);
   }
 


[50/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
HIVE-14671 : merge master into hive-14535 (Wei Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1ceaf357
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1ceaf357
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1ceaf357

Branch: refs/heads/hive-14535
Commit: 1ceaf357bbc27a42e10344e01f7da69a0df4b913
Parents: ed64a74 e4856ca
Author: Wei Zheng <we...@apache.org>
Authored: Mon May 8 15:16:29 2017 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Mon May 8 15:16:29 2017 -0700

----------------------------------------------------------------------
 .gitignore                                      |     2 +
 .travis.yml                                     |     4 +-
 RELEASE_NOTES.txt                               |   649 +-
 accumulo-handler/pom.xml                        |    22 +-
 .../accumulo/AccumuloDefaultIndexScanner.java   |   222 +
 .../hive/accumulo/AccumuloIndexLexicoder.java   |   109 +
 .../hive/accumulo/AccumuloIndexScanner.java     |    56 +
 .../accumulo/AccumuloIndexScannerException.java |    39 +
 .../hive/accumulo/AccumuloStorageHandler.java   |   155 +-
 .../accumulo/mr/AccumuloIndexDefinition.java    |    79 +
 .../mr/AccumuloIndexedOutputFormat.java         |   334 +
 .../mr/HiveAccumuloTableOutputFormat.java       |    62 +-
 .../accumulo/mr/IndexOutputConfigurator.java    |    75 +
 .../hadoop/hive/accumulo/mr/package-info.java   |     4 +
 .../predicate/AccumuloPredicateHandler.java     |    87 +-
 .../predicate/AccumuloRangeGenerator.java       |   123 +-
 .../predicate/PrimitiveComparisonFilter.java    |    13 +-
 .../accumulo/serde/AccumuloIndexParameters.java |   100 +
 .../accumulo/serde/AccumuloSerDeParameters.java |    19 +
 .../hive/accumulo/serde/package-info.java       |     4 +
 .../TestAccumuloDefaultIndexScanner.java        |   218 +
 .../accumulo/TestAccumuloIndexLexicoder.java    |   177 +
 .../accumulo/TestAccumuloIndexParameters.java   |   112 +
 .../accumulo/TestAccumuloStorageHandler.java    |     3 +
 .../predicate/TestAccumuloPredicateHandler.java |    11 +-
 .../predicate/TestAccumuloRangeGenerator.java   |   201 +-
 .../src/test/queries/positive/accumulo_index.q  |    44 +
 .../test/results/positive/accumulo_index.q.out  |   180 +
 beeline/pom.xml                                 |     2 +-
 .../java/org/apache/hive/beeline/BeeLine.java   |    72 +
 .../org/apache/hive/beeline/BeeLineOpts.java    |    15 +-
 .../java/org/apache/hive/beeline/Commands.java  |   115 +-
 .../apache/hive/beeline/HiveSchemaHelper.java   |    12 +-
 .../org/apache/hive/beeline/HiveSchemaTool.java |    71 +-
 .../org/apache/hive/beeline/OutputFile.java     |    74 +-
 .../UserHS2ConnectionFileParser.java            |     2 +-
 .../logs/BeelineInPlaceUpdateStream.java        |    17 +
 beeline/src/main/resources/BeeLine.properties   |     1 +
 .../hive/beeline/TestBeelineArgParsing.java     |    12 +
 .../apache/hive/beeline/TestHiveSchemaTool.java |    17 +
 cli/pom.xml                                     |     2 +-
 .../org/apache/hadoop/hive/cli/RCFileCat.java   |    13 +-
 .../apache/hadoop/hive/cli/TestRCFileCat.java   |     4 +-
 common/pom.xml                                  |    46 +-
 .../hive/common/CopyOnFirstWriteProperties.java |   344 +
 .../apache/hadoop/hive/common/FileUtils.java    |   162 +-
 .../apache/hadoop/hive/common/JvmMetrics.java   |   187 +
 .../hadoop/hive/common/JvmMetricsInfo.java      |    65 +
 .../org/apache/hadoop/hive/common/LogUtils.java |    35 +-
 .../hadoop/hive/common/MemoryEstimate.java      |    29 +
 .../hadoop/hive/common/StatsSetupConst.java     |     2 +-
 .../hadoop/hive/common/StringInternUtils.java   |    16 +-
 .../hive/common/ValidCompactorTxnList.java      |    11 +-
 .../hadoop/hive/common/ValidReadTxnList.java    |   115 +-
 .../apache/hadoop/hive/common/ValidTxnList.java |    18 +-
 .../hive/common/jsonexplain/Connection.java     |    35 +
 .../hive/common/jsonexplain/DagJsonParser.java  |   167 +
 .../common/jsonexplain/DagJsonParserUtils.java  |    53 +
 .../common/jsonexplain/JsonParserFactory.java   |     4 +
 .../hadoop/hive/common/jsonexplain/Op.java      |   358 +
 .../hadoop/hive/common/jsonexplain/Printer.java |    41 +
 .../hadoop/hive/common/jsonexplain/Stage.java   |   262 +
 .../hadoop/hive/common/jsonexplain/Vertex.java  |   323 +
 .../jsonexplain/spark/SparkJsonParser.java      |    35 +
 .../hive/common/jsonexplain/tez/Connection.java |    30 -
 .../hadoop/hive/common/jsonexplain/tez/Op.java  |   356 -
 .../hive/common/jsonexplain/tez/Printer.java    |    41 -
 .../hive/common/jsonexplain/tez/Stage.java      |   262 -
 .../common/jsonexplain/tez/TezJsonParser.java   |   153 +-
 .../jsonexplain/tez/TezJsonParserUtils.java     |    53 -
 .../hive/common/jsonexplain/tez/Vertex.java     |   331 -
 .../hadoop/hive/common/log/InPlaceUpdate.java   |    17 +
 .../hadoop/hive/common/log/ProgressMonitor.java |    17 +
 .../metrics/metrics2/CodahaleMetrics.java       |   192 +-
 .../metrics/metrics2/CodahaleReporter.java      |    29 +
 .../metrics2/ConsoleMetricsReporter.java        |    55 +
 .../metrics/metrics2/JmxMetricsReporter.java    |    56 +
 .../metrics2/JsonFileMetricsReporter.java       |   136 +
 .../metrics/metrics2/Metrics2Reporter.java      |    62 +
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   131 +-
 .../apache/hadoop/hive/conf/HiveConfUtil.java   |    13 +-
 .../apache/hadoop/hive/ql/log/PerfLogger.java   |     1 +
 .../java/org/apache/hive/http/ConfServlet.java  |    10 +-
 .../java/org/apache/hive/http/HttpServer.java   |    69 +-
 .../hadoop/hive/common/TestFileUtils.java       |     1 -
 .../hive/common/TestValidReadTxnList.java       |    29 +-
 .../metrics/metrics2/TestCodahaleMetrics.java   |     7 +-
 .../metrics2/TestCodahaleReportersConf.java     |   145 +
 contrib/pom.xml                                 |     2 +-
 .../clientnegative/case_with_row_sequence.q.out |    16 +-
 data/files/e011_01.txt                          |     4 +
 data/files/events.txt                           |   200 +
 .../metastore_export/csv/TABLE_PARAMS.txt       |   143 -
 .../metastore_export/csv/TABLE_PARAMS.txt.bz2   |   Bin 0 -> 481 bytes
 .../metastore_export/csv/TAB_COL_STATS.txt      |   425 -
 .../metastore_export/csv/TAB_COL_STATS.txt.bz2  |   Bin 0 -> 6216 bytes
 data/files/vector_ptf_part_simple.txt           |    40 +
 docs/changes/ChangesFancyStyle.css              |   170 -
 docs/changes/ChangesSimpleStyle.css             |    49 -
 docs/changes/changes2html.pl                    |   282 -
 docs/site.css                                   |   305 -
 docs/stylesheets/project.xml                    |    41 -
 docs/stylesheets/site.vsl                       |   317 -
 docs/velocity.properties                        |    17 -
 docs/xdocs/index.xml                            |    38 -
 docs/xdocs/language_manual/cli.xml              |   208 -
 .../data-manipulation-statements.xml            |   234 -
 docs/xdocs/language_manual/joins.xml            |   212 -
 docs/xdocs/language_manual/var_substitution.xml |   130 -
 .../working_with_bucketed_tables.xml            |    87 -
 docs/xdocs/udf/reflect.xml                      |    51 -
 druid-handler/pom.xml                           |    18 +-
 .../hadoop/hive/druid/DruidStorageHandler.java  |    70 +-
 .../hive/druid/DruidStorageHandlerUtils.java    |    72 +-
 .../hadoop/hive/druid/io/DruidOutputFormat.java |    13 +-
 .../druid/io/DruidQueryBasedInputFormat.java    |    72 +-
 .../serde/DruidGroupByQueryRecordReader.java    |     8 +-
 .../druid/serde/DruidQueryRecordReader.java     |    20 +-
 .../hadoop/hive/druid/serde/DruidSerDe.java     |    18 +-
 .../hive/druid/TestDruidStorageHandler.java     |    91 +-
 .../hive/ql/io/TestDruidRecordWriter.java       |     8 +-
 errata.txt                                      |     4 +
 hbase-handler/pom.xml                           |     2 +-
 .../src/test/queries/negative/hbase_ddl.q       |     9 +
 .../src/test/queries/positive/hbase_ddl.q       |    20 +
 .../src/test/queries/positive/hbase_queries.q   |     1 +
 .../src/test/results/negative/hbase_ddl.q.out   |    29 +
 .../src/test/results/positive/hbase_ddl.q.out   |   186 +
 hcatalog/build.properties                       |     2 +-
 hcatalog/core/pom.xml                           |    12 +-
 .../apache/hive/hcatalog/cli/TestPermsGrp.java  |     6 +-
 hcatalog/hcatalog-pig-adapter/pom.xml           |     2 +-
 hcatalog/pom.xml                                |    24 +-
 hcatalog/server-extensions/pom.xml              |     2 +-
 .../listener/DbNotificationListener.java        |    85 +-
 .../MetaStoreEventListenerConstants.java        |    33 +
 hcatalog/streaming/pom.xml                      |     2 +-
 .../hcatalog/streaming/StrictRegexWriter.java   |   188 +
 .../hive/hcatalog/streaming/TestStreaming.java  |   115 +-
 hcatalog/webhcat/java-client/pom.xml            |     2 +-
 .../hive/hcatalog/api/TestHCatClient.java       |     2 +-
 hcatalog/webhcat/svr/pom.xml                    |    76 +-
 .../hive/hcatalog/templeton/AppConfig.java      |    37 +
 .../hcatalog/templeton/DeleteDelegator.java     |     6 +-
 .../hive/hcatalog/templeton/HiveDelegator.java  |     2 +-
 .../hive/hcatalog/templeton/JarDelegator.java   |     2 +-
 .../hive/hcatalog/templeton/JobCallable.java    |   115 +
 .../hcatalog/templeton/JobRequestExecutor.java  |   341 +
 .../hcatalog/templeton/LauncherDelegator.java   |   231 +-
 .../hive/hcatalog/templeton/ListDelegator.java  |   148 +-
 .../apache/hive/hcatalog/templeton/Main.java    |    37 +-
 .../hive/hcatalog/templeton/PigDelegator.java   |     2 +-
 .../hcatalog/templeton/SecureProxySupport.java  |     3 +
 .../apache/hive/hcatalog/templeton/Server.java  |    82 +-
 .../hive/hcatalog/templeton/SqoopDelegator.java |     2 +-
 .../hcatalog/templeton/StatusDelegator.java     |    69 +-
 .../hcatalog/templeton/StreamingDelegator.java  |     2 +-
 .../templeton/TooManyRequestsException.java     |    35 +
 .../templeton/tool/TempletonControllerJob.java  |    11 +-
 .../hcatalog/templeton/tool/TempletonUtils.java |     1 +
 .../ConcurrentJobRequestsTestBase.java          |   231 +
 .../templeton/MockAnswerTestHelper.java         |    56 +
 .../templeton/TestConcurrentJobRequests.java    |    79 +
 .../TestConcurrentJobRequestsThreads.java       |   134 +
 ...tConcurrentJobRequestsThreadsAndTimeout.java |   374 +
 hplsql/pom.xml                                  |     2 +-
 .../main/java/org/apache/hive/hplsql/Udf.java   |    26 +-
 .../org/apache/hive/hplsql/TestHplsqlUdf.java   |    59 +
 itests/custom-serde/pom.xml                     |     2 +-
 itests/custom-udfs/pom.xml                      |     2 +-
 itests/custom-udfs/udf-classloader-udf1/pom.xml |     2 +-
 itests/custom-udfs/udf-classloader-udf2/pom.xml |     2 +-
 itests/custom-udfs/udf-classloader-util/pom.xml |     2 +-
 .../udf-vectorized-badexample/pom.xml           |     2 +-
 itests/hcatalog-unit/pom.xml                    |     2 +-
 .../listener/DummyRawStoreFailEvent.java        |     7 +
 .../listener/TestDbNotificationListener.java    |   190 +
 itests/hive-blobstore/pom.xml                   |     2 +-
 ...import_addpartition_blobstore_to_blobstore.q |    45 +
 .../import_addpartition_blobstore_to_local.q    |    44 +
 ...import_addpartition_blobstore_to_warehouse.q |    41 +
 .../import_addpartition_local_to_blobstore.q    |    44 +
 .../import_blobstore_to_blobstore.q             |    30 +
 .../import_blobstore_to_blobstore_nonpart.q     |    25 +
 .../clientpositive/import_blobstore_to_local.q  |    30 +
 .../import_blobstore_to_warehouse.q             |    28 +
 .../import_blobstore_to_warehouse_nonpart.q     |    23 +
 .../clientpositive/import_local_to_blobstore.q  |    31 +
 .../insert_blobstore_to_blobstore.q             |    29 +
 .../insert_empty_into_blobstore.q               |    53 +
 .../test/queries/clientpositive/orc_buckets.q   |    31 +
 .../queries/clientpositive/orc_format_nonpart.q |    30 +
 .../queries/clientpositive/orc_format_part.q    |    67 +
 .../clientpositive/orc_nonstd_partitions_loc.q  |   100 +
 .../queries/clientpositive/rcfile_buckets.q     |    31 +
 .../clientpositive/rcfile_format_nonpart.q      |    30 +
 .../queries/clientpositive/rcfile_format_part.q |    67 +
 .../rcfile_nonstd_partitions_loc.q              |   100 +
 .../clientpositive/zero_rows_blobstore.q        |    19 +
 .../queries/clientpositive/zero_rows_hdfs.q     |    18 +
 .../src/test/resources/hive-site.xml            |     5 +
 ...rt_addpartition_blobstore_to_blobstore.q.out |   283 +
 ...import_addpartition_blobstore_to_local.q.out |   283 +
 ...rt_addpartition_blobstore_to_warehouse.q.out |   271 +
 ...import_addpartition_local_to_blobstore.q.out |   277 +
 .../import_blobstore_to_blobstore.q.out         |   161 +
 .../import_blobstore_to_blobstore_nonpart.q.out |   103 +
 .../import_blobstore_to_local.q.out             |   161 +
 .../import_blobstore_to_warehouse.q.out         |   157 +
 .../import_blobstore_to_warehouse_nonpart.q.out |    99 +
 .../import_local_to_blobstore.q.out             |   159 +
 .../insert_blobstore_to_blobstore.q.out         |   110 +
 .../insert_empty_into_blobstore.q.out           |   155 +
 .../clientpositive/insert_into_table.q.out      |    27 +
 .../results/clientpositive/orc_buckets.q.out    |   183 +
 .../clientpositive/orc_format_nonpart.q.out     |   195 +
 .../clientpositive/orc_format_part.q.out        |   274 +
 .../orc_nonstd_partitions_loc.q.out             |   513 +
 .../results/clientpositive/rcfile_buckets.q.out |   183 +
 .../clientpositive/rcfile_format_nonpart.q.out  |   195 +
 .../clientpositive/rcfile_format_part.q.out     |   274 +
 .../rcfile_nonstd_partitions_loc.q.out          |   533 +
 .../write_final_output_blobstore.q.out          |    20 +
 .../clientpositive/zero_rows_blobstore.q.out    |    91 +
 .../results/clientpositive/zero_rows_hdfs.q.out |    89 +
 itests/hive-jmh/pom.xml                         |     2 +-
 itests/hive-minikdc/pom.xml                     |     2 +-
 .../apache/hive/minikdc/TestSSLWithMiniKdc.java |   101 +
 itests/hive-unit-hadoop2/pom.xml                |     2 +-
 .../hive/ql/security/TestExtendedAcls.java      |   166 -
 ...edMetastoreAuthorizationProviderWithACL.java |     1 -
 itests/hive-unit/pom.xml                        |     2 +-
 .../java/org/hadoop/hive/jdbc/SSLTestUtils.java |   103 +
 .../metastore/TestEmbeddedHiveMetaStore.java    |     2 -
 .../hive/metastore/TestHiveMetaStore.java       |    45 +-
 .../hive/metastore/TestHiveMetaStoreTxns.java   |     8 +-
 .../hive/metastore/TestMetastoreVersion.java    |     4 +-
 .../hive/metastore/TestReplChangeManager.java   |    12 +-
 .../hadoop/hive/ql/TestAutoPurgeTables.java     |   436 +
 ...estDDLWithRemoteMetastoreSecondNamenode.java |    31 +
 .../hadoop/hive/ql/TestLocationQueries.java     |     8 +-
 .../ql/TestMetaStoreLimitPartitionRequest.java  |   319 +
 .../hive/ql/TestReplicationScenarios.java       |   824 +-
 .../hive/ql/security/FolderPermissionBase.java  |   792 -
 .../hive/ql/security/TestFolderPermissions.java |    52 -
 ...StorageBasedMetastoreAuthorizationDrops.java |   205 -
 ...StorageBasedMetastoreAuthorizationReads.java |   127 -
 .../hive/ql/txn/compactor/TestCompactor.java    |    90 +-
 .../hive/beeline/TestBeeLineWithArgs.java       |    13 +
 .../org/apache/hive/beeline/TestSchemaTool.java |     9 +-
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   |    48 +-
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   |    44 +
 .../test/java/org/apache/hive/jdbc/TestSSL.java |   150 +-
 .../operation/TestOperationLoggingLayout.java   |    16 -
 .../service/cli/session/TestQueryDisplay.java   |    40 +-
 itests/pom.xml                                  |     2 +-
 itests/qtest-accumulo/pom.xml                   |     2 +-
 itests/qtest-spark/pom.xml                      |     2 +-
 itests/qtest/pom.xml                            |     2 +-
 .../hadoop/hive/cli/TestBeeLineDriver.java      |     4 +-
 .../test/resources/testconfiguration.properties |    44 +-
 itests/test-serde/pom.xml                       |     2 +-
 itests/util/pom.xml                             |     2 +-
 .../control/AbstractCoreBlobstoreCliDriver.java |    11 +-
 .../hadoop/hive/cli/control/CliConfigs.java     |     1 +
 .../hive/cli/control/CoreAccumuloCliDriver.java |    10 +-
 .../hive/cli/control/CoreBeeLineDriver.java     |    59 +-
 .../hadoop/hive/cli/control/CoreCliDriver.java  |    12 +-
 .../hive/cli/control/CoreCompareCliDriver.java  |    12 +-
 .../hive/cli/control/CoreHBaseCliDriver.java    |     9 +-
 .../cli/control/CoreHBaseNegativeCliDriver.java |     9 +-
 .../hive/cli/control/CoreNegativeCliDriver.java |    12 +-
 .../hive/cli/control/CorePerfCliDriver.java     |    12 +-
 .../hadoop/hive/ql/QTestProcessExecResult.java  |     6 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |   260 +-
 .../hadoop/hive/ql/parse/CoreParseNegative.java |    19 +-
 .../hive/beeline/ConvertedOutputFile.java       |    94 +
 .../org/apache/hive/beeline/Parallelized.java   |    64 +
 .../java/org/apache/hive/beeline/QFile.java     |   350 +
 .../apache/hive/beeline/QFileBeeLineClient.java |   162 +
 .../org/apache/hive/beeline/package-info.java   |    22 +
 .../org/apache/hive/beeline/qfile/QFile.java    |   273 -
 .../hive/beeline/qfile/QFileBeeLineClient.java  |   149 -
 .../apache/hive/beeline/qfile/package-info.java |    22 -
 jdbc-handler/pom.xml                            |     2 +-
 jdbc/pom.xml                                    |    28 +-
 .../org/apache/hive/jdbc/HiveConnection.java    |    24 +-
 .../org/apache/hive/jdbc/HiveStatement.java     |     6 +-
 .../hive/jdbc/logs/InPlaceUpdateStream.java     |    17 +
 llap-client/pom.xml                             |     2 +-
 .../apache/hadoop/hive/llap/io/api/LlapIo.java  |     1 +
 .../llap/registry/impl/LlapRegistryService.java |     5 +-
 llap-common/pom.xml                             |     2 +-
 .../apache/hadoop/hive/llap/LlapDaemonInfo.java |    92 +
 llap-ext-client/pom.xml                         |     2 +-
 llap-server/bin/runLlapDaemon.sh                |     4 +-
 llap-server/pom.xml                             |    10 +-
 .../llap/IncrementalObjectSizeEstimator.java    |     4 +-
 .../hadoop/hive/llap/cache/BuddyAllocator.java  |   181 +-
 .../hive/llap/cache/EvictionDispatcher.java     |    25 +-
 .../hive/llap/cache/LlapOomDebugDump.java       |     1 +
 .../hadoop/hive/llap/cache/LowLevelCache.java   |     2 +-
 .../hive/llap/cache/LowLevelCacheImpl.java      |    39 +
 .../llap/cache/LowLevelCacheMemoryManager.java  |    39 +-
 .../hive/llap/cache/LowLevelCachePolicy.java    |     2 +-
 .../llap/cache/LowLevelFifoCachePolicy.java     |    26 +-
 .../llap/cache/LowLevelLrfuCachePolicy.java     |    41 +-
 .../hadoop/hive/llap/cache/MemoryManager.java   |     4 +-
 .../hive/llap/cache/SerDeLowLevelCacheImpl.java |    78 +-
 .../hadoop/hive/llap/cache/SimpleAllocator.java |     5 +-
 .../hive/llap/cache/SimpleBufferManager.java    |    10 +
 .../hadoop/hive/llap/cli/LlapServiceDriver.java |    16 +-
 .../hadoop/hive/llap/cli/LlapSliderUtils.java   |    58 +
 .../llap/cli/LlapStatusOptionsProcessor.java    |     1 +
 .../hive/llap/cli/LlapStatusServiceDriver.java  |   751 +-
 .../hive/llap/cli/status/LlapStatusHelpers.java |   449 +
 .../configuration/LlapDaemonConfiguration.java  |     2 +-
 .../llap/daemon/impl/ContainerRunnerImpl.java   |     4 +
 .../impl/EvictingPriorityBlockingQueue.java     |     5 +
 .../hive/llap/daemon/impl/LlapDaemon.java       |    88 +-
 .../hive/llap/daemon/impl/LlapDaemonMXBean.java |     6 +
 .../hadoop/hive/llap/daemon/impl/Scheduler.java |     2 +
 .../llap/daemon/impl/TaskExecutorService.java   |    21 +-
 .../llap/daemon/impl/TaskRunnerCallable.java    |     1 +
 .../services/impl/LlapIoMemoryServlet.java      |    88 +
 .../daemon/services/impl/LlapWebServices.java   |     1 +
 .../hive/llap/io/api/impl/LlapIoImpl.java       |    60 +-
 .../llap/io/decode/OrcColumnVectorProducer.java |     6 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |    39 +-
 .../llap/io/encoded/SerDeEncodedDataReader.java |    22 +-
 .../hive/llap/io/metadata/OrcMetadataCache.java |    24 +-
 .../llap/metrics/LlapDaemonExecutorMetrics.java |     2 +-
 .../llap/shufflehandler/ShuffleHandler.java     |    37 +-
 .../resources/hive-webapps/llap/js/metrics.js   |     6 +-
 .../main/resources/llap-cli-log4j2.properties   |    25 +-
 .../resources/llap-daemon-log4j2.properties     |     6 +-
 .../hive/llap/cache/TestBuddyAllocator.java     |    12 +-
 .../hive/llap/cache/TestLowLevelCacheImpl.java  |     8 +-
 .../llap/cache/TestLowLevelLrfuCachePolicy.java |    19 +-
 .../hive/llap/cache/TestOrcMetadataCache.java   |    16 +-
 .../hive/llap/daemon/MiniLlapCluster.java       |     5 +
 llap-tez/pom.xml                                |     2 +-
 .../metrics/LlapTaskSchedulerMetrics.java       |     2 +-
 metastore/if/hive_metastore.thrift              |    10 +-
 metastore/pom.xml                               |    24 +-
 .../upgrade/derby/022-HIVE-11107.derby.sql      |     4 +-
 .../upgrade/derby/039-HIVE-12274.derby.sql      |    32 +
 .../upgrade/derby/040-HIVE-16399.derby.sql      |     1 +
 .../upgrade/derby/hive-schema-2.2.0.derby.sql   |    20 +-
 .../upgrade/derby/hive-schema-2.3.0.derby.sql   |   340 +
 .../upgrade/derby/hive-schema-3.0.0.derby.sql   |   340 +
 .../derby/hive-txn-schema-2.2.0.derby.sql       |     2 +-
 .../derby/hive-txn-schema-2.3.0.derby.sql       |   134 +
 .../derby/hive-txn-schema-3.0.0.derby.sql       |   134 +
 .../derby/upgrade-2.1.0-to-2.2.0.derby.sql      |     1 +
 .../derby/upgrade-2.2.0-to-2.3.0.derby.sql      |     4 +
 .../derby/upgrade-2.3.0-to-3.0.0.derby.sql      |     3 +
 .../scripts/upgrade/derby/upgrade.order.derby   |     2 +
 .../upgrade/mssql/024-HIVE-12274.mssql.sql      |    18 +
 .../upgrade/mssql/025-HIVE-16399.mssql.sql      |     1 +
 .../upgrade/mssql/hive-schema-2.2.0.mssql.sql   |    32 +-
 .../upgrade/mssql/hive-schema-2.3.0.mssql.sql   |  1023 +
 .../upgrade/mssql/hive-schema-3.0.0.mssql.sql   |  1023 +
 .../mssql/hive-txn-schema-0.14.0.mssql.sql      |     2 +-
 .../mssql/upgrade-2.1.0-to-2.2.0.mssql.sql      |     1 +
 .../mssql/upgrade-2.2.0-to-2.3.0.mssql.sql      |     6 +
 .../mssql/upgrade-2.3.0-to-3.0.0.mssql.sql      |     4 +
 .../scripts/upgrade/mssql/upgrade.order.mssql   |     2 +
 .../upgrade/mysql/039-HIVE-12274.mysql.sql      |    18 +
 .../upgrade/mysql/040-HIVE-16399.mysql.sql      |     1 +
 .../upgrade/mysql/hive-schema-2.2.0.mysql.sql   |    28 +-
 .../upgrade/mysql/hive-schema-2.3.0.mysql.sql   |   853 +
 .../upgrade/mysql/hive-schema-3.0.0.mysql.sql   |   853 +
 .../mysql/hive-txn-schema-2.2.0.mysql.sql       |     2 +-
 .../mysql/hive-txn-schema-2.3.0.mysql.sql       |   135 +
 .../mysql/hive-txn-schema-3.0.0.mysql.sql       |   135 +
 .../mysql/upgrade-2.1.0-to-2.2.0.mysql.sql      |     1 +
 .../mysql/upgrade-2.2.0-to-2.3.0.mysql.sql      |     7 +
 .../mysql/upgrade-2.3.0-to-3.0.0.mysql.sql      |     5 +
 .../scripts/upgrade/mysql/upgrade.order.mysql   |     2 +
 .../upgrade/oracle/039-HIVE-12274.oracle.sql    |    21 +
 .../upgrade/oracle/040-HIVE-16399.oracle.sql    |     1 +
 .../upgrade/oracle/hive-schema-2.2.0.oracle.sql |    30 +-
 .../upgrade/oracle/hive-schema-2.3.0.oracle.sql |   811 +
 .../upgrade/oracle/hive-schema-3.0.0.oracle.sql |   811 +
 .../oracle/hive-txn-schema-2.2.0.oracle.sql     |     2 +-
 .../oracle/hive-txn-schema-2.3.0.oracle.sql     |   133 +
 .../oracle/hive-txn-schema-3.0.0.oracle.sql     |   133 +
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |     1 +
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |     6 +
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |     4 +
 .../scripts/upgrade/oracle/upgrade.order.oracle |     2 +
 .../postgres/038-HIVE-12274.postgres.sql        |    18 +
 .../postgres/039-HIVE-16399.postgres.sql        |     1 +
 .../postgres/hive-schema-2.2.0.postgres.sql     |    30 +-
 .../postgres/hive-schema-2.3.0.postgres.sql     |  1478 +
 .../postgres/hive-schema-3.0.0.postgres.sql     |  1478 +
 .../postgres/hive-txn-schema-2.2.0.postgres.sql |     2 +-
 .../postgres/hive-txn-schema-2.3.0.postgres.sql |   133 +
 .../postgres/hive-txn-schema-3.0.0.postgres.sql |   133 +
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |     1 +
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |     7 +
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |     5 +
 .../upgrade/postgres/upgrade.order.postgres     |     2 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  2499 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |   139 +
 .../ThriftHiveMetastore_server.skeleton.cpp     |     5 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  1550 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |    23 +-
 .../hive/metastore/api/GetOpenTxnsResponse.java |   150 +-
 .../metastore/api/InsertEventRequestData.java   |   127 +-
 .../hive/metastore/api/ThriftHiveMetastore.java |  3116 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   |  1534 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |   584 +-
 .../hive_metastore/ThriftHiveMetastore-remote   |     7 +
 .../hive_metastore/ThriftHiveMetastore.py       |   948 +-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |    60 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |    13 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |    62 +
 .../apache/hadoop/hive/metastore/Deadline.java  |    29 +-
 .../hadoop/hive/metastore/HiveAlterHandler.java |   444 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   805 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    68 +-
 .../hive/metastore/HiveMetaStoreFsImpl.java     |    21 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |    14 +
 .../hive/metastore/MetaStoreDirectSql.java      |   104 +-
 .../hive/metastore/MetaStoreEventListener.java  |    12 +-
 .../metastore/MetaStoreListenerNotifier.java    |   224 +
 .../hive/metastore/MetaStoreSchemaInfo.java     |    16 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |    51 +-
 .../hadoop/hive/metastore/ObjectStore.java      |   836 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |    12 +
 .../hive/metastore/RetryingHMSHandler.java      |     2 -
 .../hive/metastore/StatObjectConverter.java     |    42 +-
 .../apache/hadoop/hive/metastore/Warehouse.java |    81 +-
 .../hive/metastore/cache/ByteArrayWrapper.java  |    45 +
 .../hadoop/hive/metastore/cache/CacheUtils.java |   113 +
 .../hive/metastore/cache/CachedStore.java       |  1622 +
 .../hive/metastore/cache/SharedCache.java       |   356 +
 .../metastore/events/AlterPartitionEvent.java   |    14 +-
 .../hive/metastore/events/AlterTableEvent.java  |    12 +-
 .../hive/metastore/events/InsertEvent.java      |    11 +
 .../hive/metastore/events/ListenerEvent.java    |   106 +
 .../hadoop/hive/metastore/hbase/HBaseStore.java |    31 +-
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |     2 +-
 .../messaging/AlterPartitionMessage.java        |     2 +
 .../metastore/messaging/AlterTableMessage.java  |     2 +
 .../hive/metastore/messaging/EventUtils.java    |    87 +-
 .../hive/metastore/messaging/InsertMessage.java |     6 +
 .../metastore/messaging/MessageFactory.java     |     9 +-
 .../metastore/messaging/PartitionFiles.java     |     3 +
 .../messaging/event/filters/AndFilter.java      |    39 +
 .../messaging/event/filters/BasicFilter.java    |    33 +
 .../event/filters/DatabaseAndTableFilter.java   |    52 +
 .../event/filters/EventBoundaryFilter.java      |    34 +
 .../event/filters/MessageFormatFilter.java      |    36 +
 .../json/JSONAlterPartitionMessage.java         |     9 +-
 .../messaging/json/JSONAlterTableMessage.java   |     9 +-
 .../messaging/json/JSONInsertMessage.java       |     9 +-
 .../messaging/json/JSONMessageDeserializer.java |     4 +
 .../messaging/json/JSONMessageFactory.java      |    18 +-
 .../hive/metastore/parser/ExpressionTree.java   |     2 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   115 +-
 .../hadoop/hive/metastore/txn/TxnUtils.java     |    17 +-
 metastore/src/model/package.jdo                 |    36 +-
 .../DummyRawStoreControlledCommit.java          |     8 +
 .../DummyRawStoreForJdoConnection.java          |     8 +
 .../metastore/TestHiveMetaStoreTimeout.java     |     1 -
 .../hadoop/hive/metastore/TestObjectStore.java  |    72 +-
 .../hive/metastore/VerifyingObjectStore.java    |     2 +-
 .../hive/metastore/cache/TestCachedStore.java   |   238 +
 .../json/JSONMessageDeserializerTest.java       |   106 +
 .../txn/TestValidCompactorTxnList.java          |    63 +-
 packaging/pom.xml                               |     2 +-
 packaging/src/main/assembly/src.xml             |     2 +-
 pom.xml                                         |   126 +-
 ql/pom.xml                                      |    25 +-
 .../UDAFTemplates/VectorUDAFAvg.txt             |     2 +-
 .../UDAFTemplates/VectorUDAFMinMax.txt          |     2 +-
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |     2 +-
 .../VectorUDAFMinMaxIntervalDayTime.txt         |     2 +-
 .../UDAFTemplates/VectorUDAFMinMaxString.txt    |     4 +-
 .../UDAFTemplates/VectorUDAFMinMaxTimestamp.txt |     2 +-
 .../UDAFTemplates/VectorUDAFSum.txt             |     2 +-
 .../UDAFTemplates/VectorUDAFVar.txt             |     2 +-
 .../UDAFTemplates/VectorUDAFVarDecimal.txt      |     4 +-
 .../java/org/apache/hadoop/hive/ql/Context.java |    11 +-
 .../java/org/apache/hadoop/hive/ql/Driver.java  |   436 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |    21 +-
 .../org/apache/hadoop/hive/ql/QueryInfo.java    |   102 +
 .../hadoop/hive/ql/QueryLifeTimeHookRunner.java |   186 +
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |    28 +-
 .../hadoop/hive/ql/exec/ArchiveUtils.java       |     7 +-
 .../apache/hadoop/hive/ql/exec/ColumnInfo.java  |     7 +-
 .../hadoop/hive/ql/exec/ColumnStatsTask.java    |    32 +-
 .../apache/hadoop/hive/ql/exec/CopyTask.java    |     3 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   106 +-
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |    11 +-
 .../exec/ExprNodeConstantDefaultEvaluator.java  |    55 -
 .../hive/ql/exec/ExprNodeEvaluatorFactory.java  |     6 -
 .../hadoop/hive/ql/exec/FetchOperator.java      |    16 +
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |    18 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |    41 +-
 .../hadoop/hive/ql/exec/GroupByOperator.java    |     5 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |     9 +-
 .../hadoop/hive/ql/exec/OperatorFactory.java    |    20 +-
 .../hadoop/hive/ql/exec/ReplCopyTask.java       |     3 +-
 .../hive/ql/exec/SerializationUtilities.java    |    30 +
 .../hadoop/hive/ql/exec/StatsNoJobTask.java     |    15 +-
 .../apache/hadoop/hive/ql/exec/TaskRunner.java  |     7 -
 .../apache/hadoop/hive/ql/exec/TopNHash.java    |     5 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |    34 +-
 .../mapjoin/MapJoinMemoryExhaustionError.java   |    28 +
 .../MapJoinMemoryExhaustionException.java       |    29 -
 .../mapjoin/MapJoinMemoryExhaustionHandler.java |     6 +-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |    44 +-
 .../hadoop/hive/ql/exec/mr/MapredLocalTask.java |    19 +-
 .../persistence/BytesBytesMultiHashMap.java     |    17 +-
 .../ql/exec/persistence/HashMapWrapper.java     |    10 +-
 .../persistence/HybridHashTableContainer.java   |     5 +
 .../persistence/MapJoinBytesTableContainer.java |    58 +-
 .../exec/persistence/MapJoinTableContainer.java |     3 +-
 .../ql/exec/spark/RemoteHiveSparkClient.java    |     9 +-
 .../hive/ql/exec/spark/SparkPlanGenerator.java  |    23 +-
 .../ql/exec/spark/SparkReduceRecordHandler.java |     6 +-
 .../hadoop/hive/ql/exec/spark/SparkTask.java    |    27 +-
 .../hive/ql/exec/spark/SparkUtilities.java      |     4 +-
 .../spark/status/RemoteSparkJobMonitor.java     |    15 +-
 .../ql/exec/spark/status/SparkJobMonitor.java   |    10 +-
 .../spark/status/impl/RemoteSparkJobStatus.java |     6 +
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |    38 +-
 .../hive/ql/exec/tez/HashTableLoader.java       |    42 +-
 .../hive/ql/exec/tez/ReduceRecordSource.java    |    11 +-
 .../hadoop/hive/ql/exec/tez/TezProcessor.java   |    11 +-
 .../hive/ql/exec/tez/TezSessionPoolManager.java |    21 +-
 .../hive/ql/exec/tez/TezSessionState.java       |     4 +
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |    18 +-
 .../hive/ql/exec/tez/monitoring/Constants.java  |    17 +
 .../hive/ql/exec/tez/monitoring/DAGSummary.java |    72 +-
 .../exec/tez/monitoring/FSCountersSummary.java  |    17 +
 .../ql/exec/tez/monitoring/LLAPioSummary.java   |    17 +
 .../ql/exec/tez/monitoring/PrintSummary.java    |    17 +
 .../QueryExecutionBreakdownSummary.java         |    17 +
 .../ql/exec/tez/monitoring/RenderStrategy.java  |    21 +-
 .../exec/tez/monitoring/TezProgressMonitor.java |    17 +
 .../vector/VectorAggregationBufferBatch.java    |     4 +-
 .../ql/exec/vector/VectorColumnSetInfo.java     |   158 +-
 .../hive/ql/exec/vector/VectorExtractRow.java   |    19 +
 .../ql/exec/vector/VectorGroupByOperator.java   |   164 +-
 .../ql/exec/vector/VectorGroupKeyHelper.java    |    57 +-
 .../ql/exec/vector/VectorHashKeyWrapper.java    |   244 +-
 .../exec/vector/VectorHashKeyWrapperBatch.java  |   456 +-
 .../ql/exec/vector/VectorMapJoinOperator.java   |     3 +
 .../exec/vector/VectorSMBMapJoinOperator.java   |     3 +
 .../ql/exec/vector/VectorizationContext.java    |    52 +-
 .../vector/expressions/CastStringToLong.java    |   271 +
 .../exec/vector/expressions/CuckooSetBytes.java |     4 +-
 .../ql/exec/vector/expressions/OctetLength.java |   149 +
 .../aggregates/VectorAggregateExpression.java   |     2 +-
 .../aggregates/VectorUDAFAvgDecimal.java        |     2 +-
 .../aggregates/VectorUDAFAvgTimestamp.java      |     2 +-
 .../aggregates/VectorUDAFBloomFilter.java       |     4 +-
 .../aggregates/VectorUDAFBloomFilterMerge.java  |     2 +-
 .../expressions/aggregates/VectorUDAFCount.java |     2 +-
 .../aggregates/VectorUDAFCountMerge.java        |     2 +-
 .../aggregates/VectorUDAFCountStar.java         |     2 +-
 .../aggregates/VectorUDAFStdPopTimestamp.java   |     2 +-
 .../aggregates/VectorUDAFStdSampTimestamp.java  |     2 +-
 .../aggregates/VectorUDAFSumDecimal.java        |     2 +-
 .../aggregates/VectorUDAFVarPopTimestamp.java   |     2 +-
 .../aggregates/VectorUDAFVarSampTimestamp.java  |     2 +-
 .../VectorMapJoinGenerateResultOperator.java    |     2 +
 .../fast/VectorMapJoinFastBytesHashMap.java     |     5 +
 .../VectorMapJoinFastBytesHashMultiSet.java     |     5 +
 .../fast/VectorMapJoinFastBytesHashSet.java     |     5 +
 .../fast/VectorMapJoinFastBytesHashTable.java   |     6 +
 .../fast/VectorMapJoinFastHashTable.java        |    13 +-
 .../fast/VectorMapJoinFastHashTableLoader.java  |    47 +-
 .../mapjoin/fast/VectorMapJoinFastKeyStore.java |    11 +-
 .../fast/VectorMapJoinFastLongHashMap.java      |     9 +-
 .../fast/VectorMapJoinFastLongHashMultiSet.java |     5 +
 .../fast/VectorMapJoinFastLongHashSet.java      |     5 +
 .../fast/VectorMapJoinFastLongHashTable.java    |    15 +
 .../fast/VectorMapJoinFastMultiKeyHashMap.java  |     5 +
 .../VectorMapJoinFastMultiKeyHashMultiSet.java  |     4 +
 .../fast/VectorMapJoinFastMultiKeyHashSet.java  |     5 +-
 .../fast/VectorMapJoinFastStringHashMap.java    |     9 +
 .../VectorMapJoinFastStringHashMultiSet.java    |     8 +
 .../fast/VectorMapJoinFastStringHashSet.java    |     8 +
 .../fast/VectorMapJoinFastTableContainer.java   |    16 +-
 .../fast/VectorMapJoinFastValueStore.java       |     8 +-
 .../hashtable/VectorMapJoinHashTable.java       |     3 +-
 .../VectorMapJoinOptimizedHashSet.java          |     5 +
 .../VectorMapJoinOptimizedHashTable.java        |     9 +
 .../VectorMapJoinOptimizedStringHashSet.java    |     8 +
 .../VectorReduceSinkCommonOperator.java         |   208 +-
 .../VectorReduceSinkLongOperator.java           |     2 +-
 .../VectorReduceSinkMultiKeyOperator.java       |     2 +-
 .../VectorReduceSinkObjectHashOperator.java     |   288 +
 .../VectorReduceSinkStringOperator.java         |     2 +-
 .../VectorReduceSinkUniformHashOperator.java    |   218 +
 .../ql/exec/vector/udf/VectorUDFAdaptor.java    |    23 +
 .../hadoop/hive/ql/history/HiveHistoryImpl.java |     5 +-
 .../hadoop/hive/ql/hooks/HookContext.java       |    10 +-
 .../apache/hadoop/hive/ql/hooks/HookUtils.java  |    52 +-
 .../hadoop/hive/ql/hooks/HooksLoader.java       |   107 +
 .../hadoop/hive/ql/hooks/LineageInfo.java       |     3 +-
 .../hooks/PostExecOrcRowGroupCountPrinter.java  |     4 +-
 .../ql/hooks/PostExecTezSummaryPrinter.java     |    12 +-
 .../hive/ql/hooks/PostExecutePrinter.java       |     6 +-
 .../hadoop/hive/ql/hooks/PreExecutePrinter.java |     6 +-
 .../ql/hooks/QueryLifeTimeHookContextImpl.java  |    34 +-
 .../hooks/QueryLifeTimeHookWithParseHooks.java  |    41 +
 .../apache/hadoop/hive/ql/index/HiveIndex.java  |     4 +-
 .../hive/ql/io/CombineHiveInputFormat.java      |     7 +
 .../hadoop/hive/ql/io/HiveFileFormatUtils.java  |     2 +-
 .../hadoop/hive/ql/io/HiveInputFormat.java      |     3 +-
 .../org/apache/hadoop/hive/ql/io/RCFile.java    |     2 +-
 .../apache/hadoop/hive/ql/io/orc/OrcFile.java   |     4 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |     8 +-
 .../ql/io/orc/encoded/EncodedReaderImpl.java    |   216 +-
 .../io/parquet/MapredParquetOutputFormat.java   |    10 +-
 .../ql/io/parquet/ParquetRecordReaderBase.java  |    14 +-
 .../ql/io/parquet/timestamp/NanoTimeUtils.java  |    15 +-
 .../vector/VectorizedParquetRecordReader.java   |     5 +-
 .../ql/io/rcfile/stats/PartialScanTask.java     |     6 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |   282 +-
 .../hadoop/hive/ql/lockmgr/DummyTxnManager.java |     3 +-
 .../hadoop/hive/ql/lockmgr/HiveLockObject.java  |    18 +-
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java  |    16 +-
 .../hive/ql/lockmgr/HiveTxnManagerImpl.java     |    25 +-
 .../zookeeper/ZooKeeperHiveLockManager.java     |    39 +-
 .../hadoop/hive/ql/log/LogDivertAppender.java   |   249 +
 .../hive/ql/log/LogDivertAppenderForTest.java   |   182 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   274 +-
 .../metadata/HiveMaterializedViewsRegistry.java |     3 +-
 .../hive/ql/metadata/HiveMetaStoreChecker.java  |   102 +-
 .../ql/metadata/SessionHiveMetaStoreClient.java |     2 +-
 .../apache/hadoop/hive/ql/metadata/Table.java   |    24 +-
 .../hadoop/hive/ql/metadata/VirtualColumn.java  |     2 +-
 .../hadoop/hive/ql/optimizer/ColumnPruner.java  |    13 +-
 .../ql/optimizer/ColumnPrunerProcFactory.java   |    36 +-
 .../hive/ql/optimizer/ConvertJoinMapJoin.java   |    86 +-
 .../DynamicPartitionPruningOptimization.java    |   207 +-
 .../hive/ql/optimizer/GenMRFileSink1.java       |    11 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |     6 +-
 .../hive/ql/optimizer/MapJoinProcessor.java     |     8 +-
 .../hadoop/hive/ql/optimizer/Optimizer.java     |     2 +-
 .../ql/optimizer/SetReducerParallelism.java     |     2 +
 .../calcite/CalciteSemanticException.java       |     5 +-
 .../ql/optimizer/calcite/HiveCalciteUtil.java   |     6 +
 .../optimizer/calcite/HivePlannerContext.java   |     9 +-
 .../optimizer/calcite/HiveRelShuttleImpl.java   |     5 +
 .../calcite/reloperators/HiveExtractDate.java   |     8 +-
 .../HiveDruidProjectFilterTransposeRule.java    |    48 +
 .../calcite/rules/HiveFilterJoinRule.java       |    51 -
 .../rules/HivePointLookupOptimizerRule.java     |    95 +-
 .../rules/HiveProjectSortTransposeRule.java     |    20 +-
 .../rules/HiveSortProjectTransposeRule.java     |    48 +-
 .../calcite/rules/HiveSubQueryRemoveRule.java   |   194 +-
 .../HiveMaterializedViewFilterScanRule.java     |     3 +-
 .../stats/FilterSelectivityEstimator.java       |    12 +
 .../calcite/stats/HiveRelMdPredicates.java      |    14 +-
 .../calcite/translator/ASTBuilder.java          |    14 +-
 .../calcite/translator/ASTConverter.java        |     1 -
 .../calcite/translator/ExprNodeConverter.java   |    18 +-
 .../calcite/translator/HiveOpConverter.java     |    21 +-
 .../calcite/translator/RexNodeConverter.java    |    83 +-
 .../translator/SqlFunctionConverter.java        |     5 +-
 .../correlation/ReduceSinkDeDuplication.java    |     2 +-
 .../ListBucketingPrunerUtils.java               |     4 +-
 .../physical/GenMRSkewJoinProcessor.java        |    13 +-
 .../physical/GenSparkSkewJoinProcessor.java     |     3 +-
 .../physical/LlapClusterStateForCompile.java    |   132 +
 .../hive/ql/optimizer/physical/LlapDecider.java |    56 +-
 .../physical/LlapPreVectorizationPass.java      |   128 +
 .../optimizer/physical/NullScanOptimizer.java   |    58 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |   334 +-
 .../ql/optimizer/physical/VectorizerReason.java |     2 +-
 .../hive/ql/optimizer/ppr/PartitionPruner.java  |     3 +-
 .../optimizer/spark/SparkMapJoinOptimizer.java  |    73 +-
 .../stats/annotation/StatsRulesProcFactory.java |    16 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |     8 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   221 +-
 .../ql/parse/ColumnStatsSemanticAnalyzer.java   |    19 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |    31 +-
 .../apache/hadoop/hive/ql/parse/EximUtil.java   |   256 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |    22 +-
 .../hive/ql/parse/ExportSemanticAnalyzer.java   |     4 +-
 .../hadoop/hive/ql/parse/FromClauseParser.g     |     3 +-
 .../hive/ql/parse/FunctionSemanticAnalyzer.java |     8 +-
 .../hadoop/hive/ql/parse/GenTezUtils.java       |    33 +-
 .../apache/hadoop/hive/ql/parse/GenTezWork.java |     3 +-
 .../apache/hadoop/hive/ql/parse/HintParser.g    |     4 +
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |     3 +-
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |     2 -
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |    38 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   101 +-
 .../hadoop/hive/ql/parse/NamedJoinInfo.java     |    65 +
 .../hadoop/hive/ql/parse/ParseContext.java      |    44 +-
 .../apache/hadoop/hive/ql/parse/ParseUtils.java |     1 +
 .../hive/ql/parse/ProcessAnalyzeTable.java      |     6 +-
 .../apache/hadoop/hive/ql/parse/QBJoinTree.java |    16 +
 .../hadoop/hive/ql/parse/QBParseInfo.java       |     9 +
 .../ql/parse/ReplicationSemanticAnalyzer.java   |   911 +-
 .../hadoop/hive/ql/parse/ReplicationSpec.java   |    22 +-
 .../hadoop/hive/ql/parse/RowResolver.java       |    10 +
 .../hadoop/hive/ql/parse/RuntimeValuesInfo.java |    10 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   337 +-
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |    23 +-
 .../hive/ql/parse/SemiJoinBranchInfo.java       |    45 +
 .../hadoop/hive/ql/parse/SemiJoinHint.java      |    43 +
 .../hadoop/hive/ql/parse/SubQueryUtils.java     |    23 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |     6 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |   512 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |     8 +-
 .../ql/parse/UpdateDeleteSemanticAnalyzer.java  |    14 +
 .../hadoop/hive/ql/parse/WindowingSpec.java     |     2 +-
 .../hadoop/hive/ql/parse/repl/DumpType.java     |    46 +
 .../dump/BootStrapReplicationSpecFunction.java  |    54 +
 .../hive/ql/parse/repl/dump/HiveWrapper.java    |    73 +
 .../hadoop/hive/ql/parse/repl/dump/Utils.java   |    50 +
 .../parse/repl/dump/events/AbstractHandler.java |    46 +
 .../repl/dump/events/AddPartitionHandler.java   |   114 +
 .../repl/dump/events/AlterPartitionHandler.java |   112 +
 .../repl/dump/events/AlterTableHandler.java     |   102 +
 .../repl/dump/events/CreateFunctionHandler.java |    36 +
 .../repl/dump/events/CreateTableHandler.java    |    86 +
 .../parse/repl/dump/events/DefaultHandler.java  |    44 +
 .../repl/dump/events/DropPartitionHandler.java  |    44 +
 .../repl/dump/events/DropTableHandler.java      |    44 +
 .../ql/parse/repl/dump/events/EventHandler.java |    62 +
 .../repl/dump/events/EventHandlerFactory.java   |    76 +
 .../parse/repl/dump/events/InsertHandler.java   |   110 +
 .../ql/parse/repl/dump/io/DBSerializer.java     |    55 +
 .../parse/repl/dump/io/FunctionSerializer.java  |    49 +
 .../hive/ql/parse/repl/dump/io/JsonWriter.java  |    55 +
 .../parse/repl/dump/io/PartitionSerializer.java |    65 +
 .../repl/dump/io/ReplicationSpecSerializer.java |    36 +
 .../ql/parse/repl/dump/io/TableSerializer.java  |   114 +
 .../dump/io/VersionCompatibleSerializer.java    |    37 +
 .../hive/ql/parse/repl/load/DumpMetaData.java   |   143 +
 .../hive/ql/parse/repl/load/MetaData.java       |    64 +
 .../hive/ql/parse/repl/load/MetadataJson.java   |   128 +
 .../load/message/AbstractMessageHandler.java    |    67 +
 .../parse/repl/load/message/DefaultHandler.java |    33 +
 .../repl/load/message/DropPartitionHandler.java |   108 +
 .../repl/load/message/DropTableHandler.java     |    51 +
 .../parse/repl/load/message/InsertHandler.java  |    47 +
 .../parse/repl/load/message/MessageHandler.java |    91 +
 .../load/message/MessageHandlerFactory.java     |    79 +
 .../load/message/RenamePartitionHandler.java    |    74 +
 .../repl/load/message/RenameTableHandler.java   |    81 +
 .../parse/repl/load/message/TableHandler.java   |    68 +
 .../load/message/TruncatePartitionHandler.java  |    69 +
 .../repl/load/message/TruncateTableHandler.java |    50 +
 .../parse/spark/SparkProcessAnalyzeTable.java   |     5 +-
 .../hadoop/hive/ql/plan/AbstractVectorDesc.java |     4 +-
 .../hadoop/hive/ql/plan/AlterTableDesc.java     |     6 +
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |     3 +-
 .../hadoop/hive/ql/plan/CreateViewDesc.java     |    22 +
 .../ql/plan/ExprNodeConstantDefaultDesc.java    |    86 -
 .../hive/ql/plan/ExprNodeConstantDesc.java      |     6 +-
 .../hadoop/hive/ql/plan/ExprNodeDescUtils.java  |    71 +
 .../hive/ql/plan/ExprNodeDynamicListDesc.java   |     9 +-
 .../apache/hadoop/hive/ql/plan/GroupByDesc.java |     2 +-
 .../hadoop/hive/ql/plan/HiveOperation.java      |    37 +-
 .../hadoop/hive/ql/plan/ImportTableDesc.java    |   321 +
 .../apache/hadoop/hive/ql/plan/JoinDesc.java    |    18 +-
 .../apache/hadoop/hive/ql/plan/MapJoinDesc.java |    18 +-
 .../hadoop/hive/ql/plan/PartitionDesc.java      |    28 +-
 .../hadoop/hive/ql/plan/ReduceSinkDesc.java     |    33 +-
 .../apache/hadoop/hive/ql/plan/ReduceWork.java  |    67 +
 .../apache/hadoop/hive/ql/plan/SparkWork.java   |    10 +-
 .../apache/hadoop/hive/ql/plan/TableDesc.java   |     4 +-
 .../hadoop/hive/ql/plan/TezEdgeProperty.java    |    18 +-
 .../hive/ql/plan/VectorAppMasterEventDesc.java  |     2 +-
 .../hadoop/hive/ql/plan/VectorFileSinkDesc.java |     2 +-
 .../hadoop/hive/ql/plan/VectorFilterDesc.java   |     2 +-
 .../hadoop/hive/ql/plan/VectorGroupByDesc.java  |     2 +-
 .../hadoop/hive/ql/plan/VectorLimitDesc.java    |     2 +-
 .../hadoop/hive/ql/plan/VectorMapJoinDesc.java  |     2 +-
 .../hadoop/hive/ql/plan/VectorMapJoinInfo.java  |     2 +-
 .../hive/ql/plan/VectorPartitionDesc.java       |     2 +-
 .../hive/ql/plan/VectorReduceSinkDesc.java      |    33 +-
 .../hive/ql/plan/VectorReduceSinkInfo.java      |    98 +-
 .../hadoop/hive/ql/plan/VectorSMBJoinDesc.java  |     2 +-
 .../hadoop/hive/ql/plan/VectorSelectDesc.java   |     2 +-
 .../ql/plan/VectorSparkHashTableSinkDesc.java   |     2 +-
 .../VectorSparkPartitionPruningSinkDesc.java    |     2 +-
 .../hive/ql/plan/VectorTableScanDesc.java       |     2 +-
 .../hive/ql/ppd/SyntheticJoinPredicate.java     |     2 -
 .../ql/processors/CommandProcessorResponse.java |    10 +-
 .../hadoop/hive/ql/processors/HiveCommand.java  |     2 +-
 .../hive/ql/processors/ResetProcessor.java      |    21 +-
 .../hadoop/hive/ql/processors/SetProcessor.java |    15 +-
 .../plugin/sqlstd/SQLAuthorizationUtils.java    |     4 +-
 .../hadoop/hive/ql/session/OperationLog.java    |   127 +-
 .../hadoop/hive/ql/session/SessionState.java    |    25 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |    47 +-
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |     3 +-
 .../hive/ql/txn/compactor/CompactorMR.java      |    10 +-
 .../org/apache/hadoop/hive/ql/udf/UDFJson.java  |    67 +-
 .../apache/hadoop/hive/ql/udf/UDFLength.java    |    66 -
 .../apache/hadoop/hive/ql/udf/UDFToBoolean.java |     3 +-
 .../apache/hadoop/hive/ql/udf/UDFToByte.java    |     3 +-
 .../apache/hadoop/hive/ql/udf/UDFToInteger.java |     3 +-
 .../apache/hadoop/hive/ql/udf/UDFToLong.java    |     3 +-
 .../apache/hadoop/hive/ql/udf/UDFToShort.java   |     3 +-
 .../generic/GenericUDAFBinarySetFunctions.java  |   452 +
 .../ql/udf/generic/GenericUDAFBloomFilter.java  |    33 +
 .../ql/udf/generic/GenericUDAFComputeStats.java |    22 +-
 .../ql/udf/generic/GenericUDAFCorrelation.java  |    26 +-
 .../generic/GenericUDAFCovarianceSample.java    |    18 +-
 .../hadoop/hive/ql/udf/generic/GenericUDF.java  |     3 +-
 .../udf/generic/GenericUDFCharacterLength.java  |   120 +
 .../ql/udf/generic/GenericUDFExtractUnion.java  |   272 +
 .../hive/ql/udf/generic/GenericUDFGrouping.java |    45 +-
 .../hive/ql/udf/generic/GenericUDFInFile.java   |     3 +-
 .../udf/generic/GenericUDFInternalInterval.java |     4 +-
 .../hive/ql/udf/generic/GenericUDFLength.java   |   129 +
 .../hive/ql/udf/generic/GenericUDFLikeAll.java  |   133 +
 .../hive/ql/udf/generic/GenericUDFLikeAny.java  |   134 +
 .../hive/ql/udf/generic/GenericUDFNullif.java   |    10 +-
 .../hive/ql/udf/generic/GenericUDFOPEqual.java  |     9 -
 .../hive/ql/udf/generic/GenericUDFOPFalse.java  |    65 +
 .../ql/udf/generic/GenericUDFOPNotEqual.java    |     9 -
 .../ql/udf/generic/GenericUDFOPNotFalse.java    |    65 +
 .../ql/udf/generic/GenericUDFOPNotTrue.java     |    65 +
 .../hive/ql/udf/generic/GenericUDFOPTrue.java   |    65 +
 .../ql/udf/generic/GenericUDFOctetLength.java   |   114 +
 .../hive/ql/udf/generic/GenericUDFTrunc.java    |    10 +-
 .../ql/udf/generic/GenericUDFWidthBucket.java   |   329 +
 .../hive/metastore/txn/TestTxnHandler.java      |     2 +-
 .../org/apache/hadoop/hive/ql/TestErrorMsg.java |     6 +-
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |    49 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |    24 +-
 .../ql/TestTxnCommands2WithSplitUpdate.java     |    61 +-
 .../hadoop/hive/ql/exec/TestOperators.java      |    72 +-
 .../TestMapJoinMemoryExhaustionHandler.java     |     4 +-
 .../tez/monitoring/TestTezProgressMonitor.java  |    17 +
 .../hive/ql/exec/vector/TestVectorSerDeRow.java |     5 +-
 .../mapjoin/fast/CheckFastRowHashMap.java       |    17 +-
 .../mapjoin/fast/CommonFastHashTable.java       |     4 +-
 .../hadoop/hive/ql/hooks/TestQueryHooks.java    |   162 +-
 .../hadoop/hive/ql/io/orc/TestOrcFile.java      |    17 +-
 .../hive/ql/io/orc/TestOrcRawRecordMerger.java  |     7 +-
 .../io/parquet/timestamp/TestNanoTimeUtils.java |    13 +
 .../hive/ql/lockmgr/TestDbTxnManager.java       |    65 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |   376 +-
 .../hive/ql/lockmgr/TestDummyTxnManager.java    |     4 +-
 .../ql/lockmgr/TestEmbeddedLockManager.java     |     4 +-
 .../hive/ql/lockmgr/TestHiveLockObject.java     |    30 +-
 .../zookeeper/TestZookeeperLockManager.java     |     2 +-
 .../hadoop/hive/ql/metadata/TestHive.java       |     2 +-
 .../ql/metadata/TestHiveMetaStoreChecker.java   |   207 +-
 .../calcite/TestCBORuleFiredOnlyOnce.java       |     2 +-
 .../hive/ql/parse/TestHiveDecimalParse.java     |    23 +-
 .../apache/hadoop/hive/ql/parse/TestIUD.java    |     8 +-
 .../hive/ql/parse/TestMergeStatement.java       |     4 +-
 .../parse/TestReplicationSemanticAnalyzer.java  |    22 +-
 .../ql/parse/repl/dump/HiveWrapperTest.java     |    27 +
 .../dump/events/TestEventHandlerFactory.java    |    62 +
 .../hive/ql/processors/TestResetProcessor.java  |    59 +
 .../hive/ql/txn/compactor/TestInitiator.java    |    11 +-
 .../TestGenericUDAFBinarySetFunctions.java      |   414 +
 .../udf/generic/TestGenericUDFExtractUnion.java |   175 +
 ...UDFExtractUnionObjectInspectorConverter.java |   109 +
 ...estGenericUDFExtractUnionValueConverter.java |   108 +
 .../ql/udf/generic/TestGenericUDFLikeAll.java   |    88 +
 .../ql/udf/generic/TestGenericUDFLikeAny.java   |    87 +
 .../ql/udf/generic/TestGenericUDFNullif.java    |    20 +
 .../ql/udf/generic/TestGenericUDFTrunc.java     |   283 +
 .../udf/generic/TestGenericUDFWidthBucket.java  |    86 +
 .../clientnegative/bucket_mapjoin_mismatch1.q   |     1 +
 .../bucket_mapjoin_wrong_table_metadata_1.q     |     2 +-
 .../bucket_mapjoin_wrong_table_metadata_2.q     |     1 +
 .../clientnegative/char_pad_convert_fail0.q     |     4 +-
 .../clientnegative/char_pad_convert_fail1.q     |     4 +-
 .../clientnegative/char_pad_convert_fail2.q     |     4 +-
 .../clientnegative/char_pad_convert_fail3.q     |     4 +-
 ...umnstats_partlvl_invalid_values_autogather.q |    17 +
 .../queries/clientnegative/decimal_precision.q  |     4 +-
 .../clientnegative/decimal_precision_1.q        |     4 +-
 .../distinct_windowing_failure1.q               |     2 +-
 .../distinct_windowing_failure2.q               |     2 +-
 .../drop_default_partition_filter.q             |     7 +
 .../queries/clientnegative/invalid_mapjoin1.q   |     1 +
 ql/src/test/queries/clientnegative/join2.q      |     1 +
 ql/src/test/queries/clientnegative/join28.q     |     2 +-
 ql/src/test/queries/clientnegative/join29.q     |     2 +-
 ql/src/test/queries/clientnegative/join32.q     |     1 +
 ql/src/test/queries/clientnegative/join35.q     |     2 +-
 .../test/queries/clientnegative/msck_repair_4.q |    14 +
 .../queries/clientnegative/nvl_mismatch_type.q  |     2 +-
 .../queries/clientnegative/smb_bucketmapjoin.q  |     2 +-
 .../queries/clientnegative/smb_mapjoin_14.q     |     2 +-
 .../sortmerge_mapjoin_mismatch_1.q              |     3 +-
 .../clientnegative/spark_job_max_tasks.q        |     6 +
 .../subquery_scalar_corr_multi_rows.q           |     2 +
 .../subquery_select_complex_expr.q              |     3 -
 .../clientnegative/subquery_select_no_source.q  |     2 +
 .../clientnegative/subquery_select_udf.q        |     2 -
 .../clientnegative/subquery_with_or_cond.q      |     5 -
 .../queries/clientnegative/udf_likeall_wrong1.q |     2 +
 .../queries/clientnegative/udf_likeany_wrong1.q |     2 +
 ql/src/test/queries/clientnegative/union22.q    |     2 +-
 .../clientpositive/alter_table_column_stats.q   |   241 +
 .../alter_table_invalidate_column_stats.q       |   153 -
 .../annotate_stats_deep_filters.q               |     2 +-
 .../clientpositive/auto_sortmerge_join_11.q     |     2 +-
 .../test/queries/clientpositive/avro_decimal.q  |    14 +-
 .../clientpositive/avro_decimal_native.q        |    14 +-
 .../queries/clientpositive/bucket_map_join_1.q  |     2 +-
 .../queries/clientpositive/bucket_map_join_2.q  |     2 +-
 .../queries/clientpositive/bucketcontext_1.q    |     2 +-
 .../queries/clientpositive/bucketcontext_2.q    |     2 +-
 .../queries/clientpositive/bucketcontext_3.q    |     2 +-
 .../queries/clientpositive/bucketcontext_4.q    |     2 +-
 .../queries/clientpositive/bucketcontext_5.q    |     2 +-
 .../queries/clientpositive/bucketcontext_6.q    |     2 +-
 .../queries/clientpositive/bucketcontext_7.q    |     2 +-
 .../queries/clientpositive/bucketcontext_8.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin10.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin11.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin12.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin13.q    |     2 +-
 .../queries/clientpositive/bucketmapjoin5.q     |     2 +-
 .../queries/clientpositive/bucketmapjoin7.q     |     1 +
 .../queries/clientpositive/bucketmapjoin8.q     |     2 +-
 .../queries/clientpositive/bucketmapjoin9.q     |     1 +
 .../clientpositive/bucketmapjoin_negative.q     |     1 +
 .../clientpositive/bucketmapjoin_negative2.q    |     2 +-
 .../clientpositive/bucketmapjoin_negative3.q    |     2 +-
 ql/src/test/queries/clientpositive/cbo_rp_gby.q |     1 -
 .../test/queries/clientpositive/cbo_rp_join.q   |     1 -
 .../test/queries/clientpositive/cbo_rp_limit.q  |     1 -
 .../queries/clientpositive/cbo_rp_semijoin.q    |     1 -
 .../clientpositive/cbo_rp_unionDistinct_2.q     |     1 +
 .../test/queries/clientpositive/cbo_rp_views.q  |     7 +-
 .../queries/clientpositive/cbo_rp_windowing_2.q |     6 +-
 .../queries/clientpositive/char_pad_convert.q   |    12 +-
 ...umn_names_with_leading_and_trailing_spaces.q |     5 +
 .../column_pruner_multiple_children.q           |    19 +
 .../clientpositive/columnstats_infinity.q       |    44 +
 ql/src/test/queries/clientpositive/comments.q   |     4 +-
 .../clientpositive/correlated_join_keys.q       |    34 +
 .../clientpositive/create_with_constraints.q    |    24 +-
 .../test/queries/clientpositive/decimal_10_0.q  |     6 +-
 .../queries/clientpositive/decimal_precision.q  |    28 +-
 .../queries/clientpositive/distinct_windowing.q |     2 +-
 .../clientpositive/distinct_windowing_no_cbo.q  |     2 +-
 .../clientpositive/drop_partitions_filter4.q    |    10 +
 .../test/queries/clientpositive/druid_basic2.q  |    24 +
 .../clientpositive/dynamic_partition_pruning.q  |     3 +-
 .../clientpositive/dynamic_semijoin_reduction.q |    20 +-
 .../dynamic_semijoin_reduction_2.q              |     3 +
 .../dynamic_semijoin_reduction_3.q              |    79 +
 .../dynamic_semijoin_user_level.q               |   107 +
 .../dynpart_sort_opt_vectorization.q            |     2 +-
 .../clientpositive/dynpart_sort_optimization.q  |     2 +-
 .../encryption_auto_purge_tables.q              |    38 +
 .../clientpositive/encryption_drop_partition.q  |     7 +-
 .../clientpositive/encryption_drop_table.q      |    12 +-
 .../encryption_drop_table_in_encrypted_db.q     |    20 +
 .../clientpositive/encryption_move_tbl.q        |     8 +-
 .../test/queries/clientpositive/explainuser_4.q |     1 +
 .../clientpositive/fp_literal_arithmetic.q      |    57 +
 .../clientpositive/groupby_grouping_id1.q       |     2 +
 .../clientpositive/groupby_grouping_id2.q       |     4 +
 .../clientpositive/groupby_grouping_id3.q       |    12 +
 .../clientpositive/groupby_grouping_sets1.q     |    20 +
 .../clientpositive/groupby_grouping_sets2.q     |     4 +
 .../clientpositive/groupby_grouping_sets3.q     |     6 +
 .../clientpositive/groupby_grouping_sets4.q     |     1 +
 .../clientpositive/groupby_grouping_sets5.q     |     2 +
 .../clientpositive/groupby_grouping_sets6.q     |     2 +
 .../groupby_grouping_sets_grouping.q            |    39 +
 .../groupby_grouping_sets_limit.q               |     3 +
 .../infer_bucket_sort_map_operators.q           |     1 +
 .../queries/clientpositive/inputwherefalse.q    |    19 +
 .../queries/clientpositive/is_distinct_from.q   |    46 +
 .../test/queries/clientpositive/jdbc_handler.q  |     1 +
 ql/src/test/queries/clientpositive/join25.q     |     2 +-
 ql/src/test/queries/clientpositive/join26.q     |     1 +
 ql/src/test/queries/clientpositive/join27.q     |     2 +-
 ql/src/test/queries/clientpositive/join30.q     |     2 +-
 ql/src/test/queries/clientpositive/join36.q     |     2 +-
 ql/src/test/queries/clientpositive/join37.q     |     1 +
 ql/src/test/queries/clientpositive/join38.q     |     1 +
 ql/src/test/queries/clientpositive/join39.q     |     1 +
 ql/src/test/queries/clientpositive/join40.q     |     1 +
 .../clientpositive/join_is_not_distinct_from.q  |    71 +
 .../test/queries/clientpositive/join_map_ppr.q  |     1 +
 .../queries/clientpositive/join_on_varchar.q    |     1 +
 .../test/queries/clientpositive/join_reorder.q  |     1 +
 .../test/queries/clientpositive/join_reorder2.q |     1 +
 .../test/queries/clientpositive/join_reorder3.q |     2 +-
 .../test/queries/clientpositive/join_reorder4.q |     2 +-
 .../clientpositive/lateral_view_onview.q        |     6 +
 ql/src/test/queries/clientpositive/llap_text.q  |     2 +-
 .../clientpositive/llap_vector_nohybridgrace.q  |    32 +
 ql/src/test/queries/clientpositive/mapjoin1.q   |     2 +
 .../queries/clientpositive/mapjoin_decimal.q    |    16 +-
 .../queries/clientpositive/mapjoin_distinct.q   |     1 +
 ql/src/test/queries/clientpositive/mergejoin.q  |     2 +
 .../clientpositive/metadata_only_queries.q      |     6 +-
 .../metadata_only_queries_with_filters.q        |     8 +-
 ql/src/test/queries/clientpositive/mm_all.q     |    22 +-
 .../test/queries/clientpositive/msck_repair_0.q |     7 +
 .../queries/clientpositive/named_column_join.q  |    52 +
 .../clientpositive/optimize_filter_literal.q    |    49 +
 .../test/queries/clientpositive/orc_file_dump.q |     6 +-
 .../queries/clientpositive/orc_llap_counters.q  |    10 +-
 .../queries/clientpositive/orc_llap_counters1.q |    10 +-
 .../test/queries/clientpositive/orc_ppd_basic.q |    10 +-
 .../clientpositive/orc_ppd_schema_evol_3a.q     |    10 +-
 .../clientpositive/orc_predicate_pushdown.q     |     4 +-
 .../clientpositive/outer_reference_windowed.q   |    80 +
 .../queries/clientpositive/parallel_colstats.q  |    32 +
 .../queries/clientpositive/parquet_decimal.q    |    12 +-
 .../clientpositive/parquet_int96_timestamp.q    |     2 +-
 .../clientpositive/parquet_ppd_multifiles.q     |     6 +-
 .../clientpositive/parquet_predicate_pushdown.q |     6 +-
 .../clientpositive/partitions_filter_default.q  |    14 +
 ql/src/test/queries/clientpositive/pcs.q        |     3 +-
 .../test/queries/clientpositive/perf/query9.q   |    50 +
 .../clientpositive/position_alias_test_1.q      |     5 +-
 .../queries/clientpositive/primitive_types.q    |     4 +-
 .../queries/clientpositive/quotedid_stats.q     |    11 +
 .../clientpositive/rename_partition_location.q  |    14 +
 ql/src/test/queries/clientpositive/row__id.q    |     4 +-
 .../clientpositive/schema_evol_orc_acid_part.q  |    15 +-
 .../schema_evol_orc_acid_part_update.q          |     3 +-
 .../clientpositive/schema_evol_orc_acid_table.q |     9 +-
 .../schema_evol_orc_acid_table_update.q         |     3 +-
 .../schema_evol_orc_acidvec_part.q              |    46 +-
 .../schema_evol_orc_acidvec_part_update.q       |     3 +-
 .../schema_evol_orc_acidvec_table.q             |    48 +-
 .../schema_evol_orc_acidvec_table_update.q      |     3 +-
 .../schema_evol_orc_nonvec_part.q               |    22 +-
 .../schema_evol_orc_nonvec_part_all_complex.q   |    10 +-
 .../schema_evol_orc_nonvec_part_all_primitive.q |    14 +-
 .../schema_evol_orc_nonvec_table.q              |    14 +-
 .../clientpositive/schema_evol_orc_vec_part.q   |     2 +-
 .../schema_evol_orc_vec_part_all_complex.q      |     2 +-
 .../schema_evol_orc_vec_part_all_primitive.q    |     2 +-
 .../clientpositive/schema_evol_orc_vec_table.q  |     2 +-
 .../schema_evol_text_nonvec_part.q              |    22 +-
 .../schema_evol_text_nonvec_part_all_complex.q  |    10 +-
 ...schema_evol_text_nonvec_part_all_primitive.q |    14 +-
 .../schema_evol_text_nonvec_table.q             |    14 +-
 .../clientpositive/schema_evol_text_vec_part.q  |     2 +-
 .../schema_evol_text_vec_part_all_complex.q     |     2 +-
 .../schema_evol_text_vec_part_all_primitive.q   |     2 +-
 .../clientpositive/schema_evol_text_vec_table.q |     2 +-
 .../schema_evol_text_vecrow_part.q              |     2 +-
 .../schema_evol_text_vecrow_part_all_complex.q  |     2 +-
 ...schema_evol_text_vecrow_part_all_primitive.q |     2 +-
 .../schema_evol_text_vecrow_table.q             |     2 +-
 .../clientpositive/select_column_pruning.q      |     4 +
 .../test/queries/clientpositive/semijoin_hint.q |   104 +
 ql/src/test/queries/clientpositive/skewjoin.q   |     2 +-
 .../test/queries/clientpositive/smb_mapjoin9.q  |     3 +-
 .../test/queries/clientpositive/smb_mapjoin_1.q |     1 +
 .../queries/clientpositive/smb_mapjoin_10.q     |     2 +-
 .../queries/clientpositive/smb_mapjoin_11.q     |     1 +
 .../queries/clientpositive/smb_mapjoin_12.q     |     4 +-
 .../queries/clientpositive/smb_mapjoin_13.q     |     2 +-
 .../queries/clientpositive/smb_mapjoin_16.q     |     2 +-
 .../test/queries/clientpositive/smb_mapjoin_2.q |     2 +-
 .../test/queries/clientpositive/smb_mapjoin_3.q |     1 +
 .../test/queries/clientpositive/smb_mapjoin_7.q |     2 +-
 .../clientpositive/sort_merge_join_desc_1.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_2.q     |     1 +
 .../clientpositive/sort_merge_join_desc_3.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_4.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_5.q     |     1 +
 .../clientpositive/sort_merge_join_desc_6.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_7.q     |     2 +-
 .../clientpositive/sort_merge_join_desc_8.q     |     2 +-
 .../clientpositive/spark_explainuser_1.q        |   671 +
 .../queries/clientpositive/subquery_multi.q     |    15 +
 .../queries/clientpositive/subquery_scalar.q    |     1 +
 .../queries/clientpositive/subquery_select.q    |   138 +-
 .../temp_table_windowing_expressions.q          |     2 +-
 .../test/queries/clientpositive/tez_smb_main.q  |    13 +-
 .../tez_vector_dynpart_hashjoin_1.q             |     1 +
 .../test/queries/clientpositive/tunable_ndv.q   |    64 +
 .../clientpositive/udaf_binarysetfunctions.q    |    58 +
 .../clientpositive/udaf_percentile_approx_23.q  |     1 +
 .../clientpositive/udf_character_length.q       |    29 +
 ql/src/test/queries/clientpositive/udf_isops.q  |    34 +
 .../test/queries/clientpositive/udf_likeall.q   |    57 +
 .../test/queries/clientpositive/udf_likeany.q   |    57 +
 ql/src/test/queries/clientpositive/udf_nullif.q |    11 +
 .../queries/clientpositive/udf_octet_length.q   |    21 +
 .../clientpositive/udf_round_2_auto_stats.q     |    16 +
 ql/src/test/queries/clientpositive/udf_trunc.q  |   146 +-
 .../queries/clientpositive/udf_width_bucket.q   |   204 +
 .../queries/clientpositive/unionDistinct_1.q    |     4 +-
 .../queries/clientpositive/updateAccessTime.q   |     5 +
 .../clientpositive/vector_binary_join_groupby.q |    22 +-
 .../clientpositive/vector_cast_constant.q       |     4 +-
 .../clientpositive/vector_complex_join.q        |     3 +-
 .../queries/clientpositive/vector_data_types.q  |    16 +-
 .../clientpositive/vector_decimal_10_0.q        |     8 +-
 .../clientpositive/vector_decimal_mapjoin.q     |    14 +-
 .../clientpositive/vector_decimal_precision.q   |    26 +-
 .../clientpositive/vector_decimal_round.q       |    31 +-
 .../clientpositive/vector_decimal_round_2.q     |    92 +-
 .../clientpositive/vector_groupby_cube1.q       |    55 +
 .../vector_groupby_grouping_id1.q               |    23 +
 .../vector_groupby_grouping_id2.q               |    65 +
 .../vector_groupby_grouping_id3.q               |    42 +
 .../vector_groupby_grouping_sets1.q             |    43 +
 .../vector_groupby_grouping_sets2.q             |    36 +
 .../vector_groupby_grouping_sets3.q             |    40 +
 .../vector_groupby_grouping_sets4.q             |    57 +
 .../vector_groupby_grouping_sets5.q             |    39 +
 .../vector_groupby_grouping_sets6.q             |    38 +
 .../vector_groupby_grouping_sets_grouping.q     |   135 +
 .../vector_groupby_grouping_sets_limit.q        |    44 +
 .../vector_groupby_grouping_window.q            |    21 +
 .../clientpositive/vector_groupby_mapjoin.q     |    14 +
 .../clientpositive/vector_groupby_reduce.q      |     2 +
 .../clientpositive/vector_groupby_rollup1.q     |    54 +
 .../queries/clientpositive/vector_order_null.q  |    56 +
 .../clientpositive/vector_ptf_part_simple.q     |   268 +
 .../clientpositive/vector_string_concat.q       |     4 +-
 .../vector_udf_character_length.q               |    31 +
 .../clientpositive/vector_udf_octet_length.q    |    23 +
 .../clientpositive/vectorization_limit.q        |    14 +-
 .../clientpositive/vectorized_bucketmapjoin1.q  |     2 +-
 .../queries/clientpositive/vectorized_case.q    |    18 +
 .../vectorized_dynamic_partition_pruning.q      |     3 +-
 .../vectorized_dynamic_semijoin_reduction.q     |     2 +
 .../vectorized_dynamic_semijoin_reduction2.q    |     4 +
 .../clientpositive/vectorized_parquet_types.q   |     5 +-
 .../queries/clientpositive/vectorized_ptf.q     |    46 +-
 ql/src/test/queries/clientpositive/windowing.q  |     2 +-
 .../queries/clientpositive/windowing_distinct.q |     8 +-
 .../clientpositive/windowing_expressions.q      |     2 +-
 .../windowing_multipartitioning.q               |     6 +-
 .../queries/clientpositive/windowing_navfn.q    |     6 +-
 .../queries/clientpositive/windowing_ntile.q    |     4 +-
 .../clientpositive/windowing_order_null.q       |     2 +-
 .../clientpositive/windowing_range_multiorder.q |     2 +-
 .../queries/clientpositive/windowing_rank.q     |    28 +-
 .../clientpositive/windowing_streaming.q        |     4 +-
 .../queries/clientpositive/windowing_udaf.q     |     4 +-
 .../clientpositive/windowing_windowspec.q       |     4 +-
 .../clientpositive/windowing_windowspec2.q      |     2 +-
 .../clientpositive/zero_rows_single_insert.q    |    17 +
 .../results/clientnegative/acid_overwrite.q.out |     2 +-
 .../clientnegative/alter_non_native.q.out       |     2 +-
 .../alter_view_as_select_with_partition.q.out   |     1 +
 .../clientnegative/alter_view_failure6.q.out    |     2 +-
 .../results/clientnegative/bad_exec_hooks.q.out |     2 +-
 .../clientnegative/char_pad_convert_fail0.q.out |     4 +-
 .../clientnegative/char_pad_convert_fail1.q.out |     4 +-
 .../clientnegative/char_pad_convert_fail2.q.out |     4 +-
 .../clientnegative/char_pad_convert_fail3.q.out |     4 +-
 ...tats_partlvl_invalid_values_autogather.q.out |    69 +
 .../clientnegative/decimal_precision.q.out      |     6 +-
 .../clientnegative/decimal_precision_1.q.out    |     6 +-
 .../distinct_windowing_failure1.q.out           |     4 +-
 .../distinct_windowing_failure2.q.out           |     4 +-
 .../drop_default_partition_filter.q.out         |    23 +
 .../test/results/clientnegative/external1.q.out |     2 +-
 .../clientnegative/input_part0_neg.q.out        |     2 +-
 .../insert_into_with_schema.q.out               |     2 +-
 .../insert_into_with_schema1.q.out              |     2 +-
 .../insert_into_with_schema2.q.out              |     2 +-
 .../results/clientnegative/msck_repair_4.q.out  |    22 +
 .../clientnegative/nvl_mismatch_type.q.out      |     4 +-
 .../ptf_negative_InvalidValueBoundary.q.out     |     3 +-
 .../spark/spark_job_max_tasks.q.out             |    77 +
 .../subquery_corr_grandparent.q.out             |     2 +-
 .../clientnegative/subquery_in_select.q.out     |     1 -
 .../subquery_scalar_corr_multi_rows.q.out       |     5 +
 .../subquery_scalar_multi_columns.q.out         |     4 +-
 .../subquery_scalar_multi_rows.q.out            |     3 +-
 .../subquery_select_aggregate.q.out             |     2 +-
 .../subquery_select_complex_expr.q.out          |     1 -
 .../subquery_select_no_source.q.out             |     1 +
 .../clientnegative/subquery_select_udf.q.out    |     1 -
 .../clientnegative/subquery_with_or_cond.q.out  |     1 -
 .../clientnegative/udf_likeall_wrong1.q.out     |     1 +
 .../clientnegative/udf_likeany_wrong1.q.out     |     1 +
 ql/src/test/results/clientnegative/union2.q.out |     2 +-
 .../clientnegative/wrong_column_type.q.out      |     2 +-
 .../clientpositive/acid_table_stats.q.out       |    16 +-
 .../clientpositive/add_part_multiple.q.out      |    16 +-
 ...lter_numbuckets_partitioned_table2_h23.q.out |     9 +
 ...alter_numbuckets_partitioned_table_h23.q.out |     8 +
 .../alter_partition_clusterby_sortby.q.out      |     1 +
 .../alter_partition_coltype.q.out               |     2 +-
 .../alter_table_add_partition.q.out             |     3 +
 .../alter_table_column_stats.q.out              |  2706 ++
 .../alter_table_invalidate_column_stats.q.out   |   932 -
 .../clientpositive/alter_table_serde2.q.out     |     2 +
 .../annotate_stats_deep_filters.q.out           |     4 +-
 .../clientpositive/autoColumnStats_3.q.out      |     1 +
 .../clientpositive/autoColumnStats_4.q.out      |     4 +-
 .../clientpositive/autoColumnStats_7.q.out      |     2 +-
 .../clientpositive/autoColumnStats_8.q.out      |     4 +-
 .../results/clientpositive/avro_decimal.q.out   |    28 +-
 .../clientpositive/avro_decimal_native.q.out    |    28 +-
 .../avro_schema_evolution_native.q.out          |     2 +
 .../results/clientpositive/ba_table_udfs.q.out  |     2 +-
 .../beeline/drop_with_concurrency.q.out         |    63 +-
 .../beeline/escape_comments.q.out               |   428 +-
 .../beeline/select_dummy_source.q.out           |   251 +
 .../clientpositive/beeline/smb_mapjoin_1.q.out  |   490 +
 .../clientpositive/beeline/smb_mapjoin_10.q.out |   107 +
 .../clientpositive/beeline/smb_mapjoin_11.q.out |  2161 +
 .../clientpositive/beeline/smb_mapjoin_12.q.out |   430 +
 .../clientpositive/beeline/smb_mapjoin_13.q.out |   388 +
 .../clientpositive/beeline/smb_mapjoin_16.q.out |    96 +
 .../clientpositive/beeline/smb_mapjoin_2.q.out  |   498 +
 .../clientpositive/beeline/smb_mapjoin_3.q.out  |   494 +
 .../clientpositive/beeline/smb_mapjoin_7.q.out  |  1268 +
 .../clientpositive/bucket_map_join_spark1.q.out |     8 +-
 .../clientpositive/bucket_map_join_spark2.q.out |     8 +-
 .../clientpositive/bucket_map_join_spark3.q.out |     8 +-
 .../results/clientpositive/bucketmapjoin5.q.out |    12 +-
 .../clientpositive/bucketmapjoin_negative.q.out |     2 +-
 .../bucketmapjoin_negative2.q.out               |     2 +-
 .../bucketsortoptimize_insert_3.q.out           |     4 +-
 ql/src/test/results/clientpositive/cast1.q.out  |     6 +-
 .../clientpositive/cbo_rp_auto_join1.q.out      |     4 +-
 .../clientpositive/cbo_rp_outer_join_ppr.q.out  |   297 +-
 ql/src/test/results/clientpositive/char_1.q.out |     8 +-
 .../clientpositive/char_pad_convert.q.out       |    20 +-
 .../columnStatsUpdateForStatsOptimizer_2.q.out  |     8 +-
 ...names_with_leading_and_trailing_spaces.q.out |    20 +
 .../column_pruner_multiple_children.q.out       |   189 +
 .../clientpositive/columnstats_infinity.q.out   |   295 +
 .../clientpositive/columnstats_partlvl.q.out    |     4 +-
 .../clientpositive/columnstats_partlvl_dp.q.out |    16 +-
 .../clientpositive/columnstats_tbllvl.q.out     |    16 +-
 .../test/results/clientpositive/comments.q.out  |    76 +-
 .../results/clientpositive/complex_alias.q.out  |     8 +-
 .../clientpositive/constant_prop_3.q.out        |     4 +-
 .../results/clientpositive/constprog2.q.out     |    31 +-
 .../clientpositive/correlated_join_keys.q.out   |   258 +
 .../clientpositive/correlationoptimizer13.q.out |     8 +-
 .../clientpositive/create_like_view.q.out       |     1 +
 .../clientpositive/create_or_replace_view.q.out |     4 +
 .../create_table_like_stats.q.out               |     2 +
 .../create_view_partitioned.q.out               |     3 +
 .../create_with_constraints.q.out               |    56 +-
 .../results/clientpositive/decimal_10_0.q.out   |     8 +-
 .../clientpositive/decimal_precision.q.out      |    56 +-
 .../results/clientpositive/decimal_udf.q.out    |    12 +-
 .../clientpositive/default_file_format.q.out    |    10 +
 .../results/clientpositive/deleteAnalyze.q.out  |    20 +-
 .../clientpositive/describe_syntax.q.out        |     2 +
 .../results/clientpositive/describe_table.q.out |     3 +
 .../display_colstats_tbllvl.q.out               |     8 +-
 .../clientpositive/distinct_windowing.q.out     |     4 +-
 .../distinct_windowing_no_cbo.q.out             |     8 +-
 .../drop_partitions_filter4.q.out               |    71 +
 .../results/clientpositive/druid_basic2.q.out   |   376 +-
 .../clientpositive/druid_intervals.q.out        |   138 +-
 .../clientpositive/druid_timeseries.q.out       |    48 +-
 .../results/clientpositive/druid_topn.q.out     |   328 +-
 .../clientpositive/dynamic_rdd_cache.q.out      |    56 +-
 .../encryption_auto_purge_tables.q.out          |   157 +
 .../encrypted/encryption_drop_partition.q.out   |    37 +-
 .../encrypted/encryption_drop_table.q.out       |    59 +-
 .../encryption_drop_table_in_encrypted_db.q.out |    53 +
 .../encrypted/encryption_move_tbl.q.out         |    50 +-
 .../clientpositive/escape_comments.q.out        |     1 +
 .../results/clientpositive/except_all.q.out     |    16 +-
 .../clientpositive/exim_hidden_files.q.out      |     1 +
 .../clientpositive/filter_cond_pushdown.q.out   |   124 +-
 .../clientpositive/filter_join_breaktask2.q.out |    46 +-
 .../clientpositive/fouter_join_ppr.q.out        |   594 +-
 .../clientpositive/fp_literal_arithmetic.q.out  |   338 +
 .../test/results/clientpositive/groupby12.q.out |     2 +-
 .../test/results/clientpositive/groupby5.q.out  |     2 +-
 .../clientpositive/groupby5_noskew.q.out        |     2 +-
 .../results/clientpositive/groupby7_map.q.out   |     4 +-
 .../groupby7_map_multi_single_reducer.q.out     |     4 +-
 .../clientpositive/groupby7_map_skew.q.out      |     4 +-
 .../clientpositive/groupby7_noskew.q.out        |     4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |     4 +-
 .../test/results/clientpositive/groupby8.q.out  |     8 +-
 .../results/clientpositive/groupby8_map.q.out   |     4 +-
 .../clientpositive/groupby8_map_skew.q.out      |     4 +-
 .../clientpositive/groupby8_noskew.q.out        |     4 +-
 .../test/results/clientpositive/groupby9.q.out  |    28 +-
 .../clientpositive/groupby_cube_multi_gby.q.out |     2 +-
 .../clientpositive/groupby_grouping_id1.q.out   |   120 +-
 .../clientpositive/groupby_grouping_id3.q.out   |   139 +
 .../clientpositive/groupby_grouping_sets1.q.out |   496 +-
 .../clientpositive/groupby_grouping_sets2.q.out |    62 +-
 .../clientpositive/groupby_grouping_sets3.q.out |    41 +-
 .../clientpositive/groupby_grouping_sets5.q.out |    36 +-
 .../clientpositive/groupby_grouping_sets6.q.out |     4 +-
 .../groupby_grouping_sets_grouping.q.out        |   470 +-
 .../groupby_grouping_sets_limit.q.out           |    34 +-
 .../clientpositive/groupby_join_pushdown.q.out  |    26 +-
 .../groupby_multi_single_reducer.q.out          |     2 +-
 .../clientpositive/groupby_position.q.out       |    34 +-
 .../clientpositive/groupby_sort_skew_1_23.q.out |     4 +-
 .../test/results/clientpositive/having2.q.out   |    86 +-
 .../clientpositive/index_auto_unused.q.out      |    38 +-
 .../clientpositive/index_auto_update.q.out      |     4 +-
 .../clientpositive/infer_bucket_sort.q.out      |     6 +-
 .../infer_bucket_sort_grouping_operators.q.out  |    30 +-
 .../infer_bucket_sort_map_operators.q.out       |    53 +-
 .../infer_bucket_sort_reducers_power_two.q.out  |     6 +-
 ql/src/test/results/clientpositive/input8.q.out |     4 +-
 ql/src/test/results/clientpositive/input9.q.out |     4 +-
 .../results/clientpositive/input_part10.q.out   |    24 +-
 .../clientpositive/inputwherefalse.q.out        |    55 +
 .../insert_values_orig_table_use_metadata.q.out |    18 +-
 ql/src/test/results/clientpositive/join25.q.out |   114 +-
 ql/src/test/results/clientpositive/join26.q.out |   418 +-
 ql/src/test/results/clientpositive/join27.q.out |   112 +-
 ql/src/test/results/clientpositive/join30.q.out |    66 +-
 ql/src/test/results/clientpositive/join36.q.out |   114 +-
 ql/src/test/results/clientpositive/join37.q.out |   114 +-
 ql/src/test/results/clientpositive/join38.q.out |   108 +-
 ql/src/test/results/clientpositive/join39.q.out |    86 +-
 ql/src/test/results/clientpositive/join40.q.out |   117 +-
 ql/src/test/results/clientpositive/join45.q.out |    64 +-
 .../clientpositive/join_alt_syntax.q.out        |   118 +-
 .../clientpositive/join_cond_pushdown_1.q.out   |   100 +-
 .../clientpositive/join_cond_pushdown_3.q.out   |   100 +-
 .../join_cond_pushdown_unqual1.q.out            |     2 +-
 .../join_cond_pushdown_unqual3.q.out            |     2 +-
 .../results/clientpositive/join_merging.q.out   |    72 +-
 .../clientpositive/join_on_varchar.q.out        |    46 +-
 .../results/clientpositive/join_reorder.q.out   |   106 +-
 .../results/clientpositive/join_reorder4.q.out  |   327 +-
 .../clientpositive/lateral_view_onview.q.out    |   205 +-
 .../limit_pushdown_negative.q.out               |     4 +-
 .../llap/acid_bucket_pruning.q.out              |     6 +-
 .../clientpositive/llap/autoColumnStats_1.q.out |     1 +
 .../clientpositive/llap/autoColumnStats_2.q.out |     5 +-
 .../llap/auto_smb_mapjoin_14.q.out              |     4 +-
 .../llap/auto_sortmerge_join_9.q.out            |     4 +-
 .../clientpositive/llap/bucket_groupby.q.out    |   426 +-
 .../clientpositive/llap/bucketmapjoin1.q.out    |   212 +-
 .../clientpositive/llap/bucketmapjoin2.q.out    |   156 +-
 .../clientpositive/llap/bucketmapjoin3.q.out    |   104 +-
 .../clientpositive/llap/bucketmapjoin4.q.out    |   104 +-
 .../clientpositive/llap/cbo_rp_lineage2.q.out   |    58 +-
 .../clientpositive/llap/cbo_rp_views.q.out      |     4 +-
 .../llap/cbo_rp_windowing_2.q.out               |    54 +-
 .../results/clientpositive/llap/cbo_views.q.out |     2 +-
 .../results/clientpositive/llap/cluster.q.out   |   688 +-
 ...names_with_leading_and_trailing_spaces.q.out |    20 +
 .../llap/column_table_stats.q.out               |    18 +-
 .../llap/column_table_stats_orc.q.out           |    12 +-
 .../llap/constprog_semijoin.q.out               |    16 +-
 .../llap/correlationoptimizer1.q.out            |    32 +-
 .../results/clientpositive/llap/cte_1.q.out     | 37900 ++++++++---------
 .../clientpositive/llap/deleteAnalyze.q.out     |    16 +-
 .../llap/dynamic_partition_pruning.q.out        |    39 +-
 .../llap/dynamic_partition_pruning_2.q.out      |    72 +-
 .../llap/dynamic_semijoin_reduction.q.out       |  1187 +-
 .../llap/dynamic_semijoin_reduction_2.q.out     |     8 +-
 .../llap/dynamic_semijoin_reduction_3.q.out     |   268 +-
 .../llap/dynamic_semijoin_user_level.q.out      |  1486 +
 .../llap/dynpart_sort_opt_vectorization.q.out   |    50 +-
 .../llap/dynpart_sort_optimization.q.out        |     4 +-
 .../llap/dynpart_sort_optimization2.q.out       |     4 +-
 .../clientpositive/llap/except_distinct.q.out   |     2 +-
 .../clientpositive/llap/explainuser_1.q.out     |  1356 +-
 .../clientpositive/llap/explainuser_2.q.out     |  2302 +-
 .../clientpositive/llap/explainuser_4.q.out     |     6 +-
 .../llap/filter_join_breaktask2.q.out           |    46 +-
 .../llap/groupby_grouping_id2.q.out             |     9 +
 .../llap/hybridgrace_hashjoin_2.q.out           |     4 +-
 .../clientpositive/llap/intersect_all.q.out     |  1470 +-
 .../llap/intersect_distinct.q.out               |   924 +-
 .../clientpositive/llap/is_distinct_from.q.out  |   335 +
 .../clientpositive/llap/jdbc_handler.q.out      |    52 +-
 .../clientpositive/llap/join_filters.q.out      |     8 +-
 .../llap/join_is_not_distinct_from.q.out        |  1673 +
 .../clientpositive/llap/join_nulls.q.out        |     2 +-
 .../clientpositive/llap/lateral_view.q.out      |    12 +-
 .../clientpositive/llap/limit_pushdown.q.out    |     4 +-
 .../clientpositive/llap/limit_pushdown3.q.out   |     4 +-
 .../results/clientpositive/llap/lineage2.q.out  |    58 +-
 .../results/clientpositive/llap/lineage3.q.out  |     4 +-
 .../clientpositive/llap/llap_stats.q.out        |     4 +-
 .../llap/llap_vector_nohybridgrace.q.out        |   356 +
 .../clientpositive/llap/mapjoin_decimal.q.out   |    32 +-
 .../results/clientpositive/llap/mergejoin.q.out |   470 +-
 .../llap/metadata_only_queries.q.out            |    12 +-
 .../metadata_only_queries_with_filters.q.out    |    16 +-
 .../results/clientpositive/llap/mm_all.q.out    |   140 +-
 .../results/clientpositive/llap/mm_all2.q.out   |   495 -
 .../clientpositive/llap/multiMapJoin1.q.out     |     4 +-
 .../clientpositive/llap/multi_column_in.q.out   |     2 +-
 .../llap/multi_count_distinct_null.q.out        |    24 +-
 .../llap/multi_insert_lateral_view.q.out        |   316 +-
 .../llap/offset_limit_ppd_optimizer.q.out       |     4 +-
 .../clientpositive/llap/orc_analyze.q.out       |    32 +-
 .../clientpositive/llap/orc_create.q.out        |     4 +
 .../clientpositive/llap/orc_llap_counters.q.out |    20 +-
 .../llap/orc_llap_counters1.q.out               |    20 +-
 .../clientpositive/llap/orc_merge10.q.out       |     4 +-
 .../clientpositive/llap/orc_merge11.q.out       |   110 +-
 .../clientpositive/llap/orc_merge12.q.out       |     4 +-
 .../clientpositive/llap/orc_ppd_basic.q.out     |    20 +-
 .../clientpositive/llap/orc_ppd_decimal.q.out   |     4 +-
 .../llap/orc_ppd_schema_evol_3a.q.out           |    20 +-
 .../llap/orc_predicate_pushdown.q.out           |    14 +-
 .../clientpositive/llap/parallel_colstats.q.out |  1516 +
 .../llap/parquet_predicate_pushdown.q.out       |    14 +-
 .../clientpositive/llap/parquet_types.q.out     |     2 +-
 .../llap/partition_multilevels.q.out            |   244 +-
 .../test/results/clientpositive/llap/ptf.q.out  |    12 +-
 .../llap/reduce_deduplicate_extended.q.out      |    78 +-
 .../llap/schema_evol_orc_acid_part.q.out        |    91 +-
 .../llap/schema_evol_orc_acid_table.q.out       |    11 +
 .../llap/schema_evol_orc_acidvec_part.q.out     |   700 +-
 .../llap/schema_evol_orc_acidvec_table.q.out    |   686 +
 .../llap/schema_evol_orc_nonvec_part.q.out      |   450 +-
 ...chema_evol_orc_nonvec_part_all_complex.q.out |   150 +-
 ...ema_evol_orc_nonvec_part_all_primitive.q.out |   250 +-
 .../llap/schema_evol_orc_nonvec_table.q.out     |   250 +-
 .../clientpositive/llap/schema_evol_stats.q.out |     2 +
 .../llap/schema_evol_text_nonvec_part.q.out     |   450 +-
 ...hema_evol_text_nonvec_part_all_complex.q.out |   150 +-
 ...ma_evol_text_nonvec_part_all_primitive.q.out |   250 +-
 .../llap/schema_evol_text_nonvec_table.q.out    |   250 +-
 .../clientpositive/llap/semijoin_hint.q.out     |  2934 ++
 .../results/clientpositive/llap/skewjoin.q.out  |    98 +-
 .../clientpositive/llap/skiphf_aggr.q.out       |     4 +-
 .../clientpositive/llap/smb_mapjoin_14.q.out    |   207 +-
 .../clientpositive/llap/smb_mapjoin_15.q.out    |   276 +-
 .../clientpositive/llap/smb_mapjoin_17.q.out    |    98 +-
 .../clientpositive/llap/smb_mapjoin_4.q.out     |   889 +-
 .../clientpositive/llap/smb_mapjoin_5.q.out     |   889 +-
 .../clientpositive/llap/smb_mapjoin_6.q.out     |   274 +-
 .../results/clientpositive/llap/stats11.q.out   |   104 +-
 .../clientpositive/llap/stats_noscan_1.q.out    |     1 +
 .../clientpositive/llap/subquery_exists.q.out   |    41 +-
 .../clientpositive/llap/subquery_in.q.out       |   630 +-
 .../clientpositive/llap/subquery_multi.q.out    |  1490 +-
 .../clientpositive/llap/subquery_notin.q.out    |   692 +-
 .../clientpositive/llap/subquery_scalar.q.out   |  2780 +-
 .../clientpositive/llap/subquery_select.q.out   |  5387 ++-
 .../llap/table_access_keys_stats.q.out          |     4 +-
 .../llap/tez_dynpart_hashjoin_1.q.out           |     2 +-
 .../clientpositive/llap/tez_join_hash.q.out     |    10 +-
 .../clientpositive/llap/tez_smb_main.q.out      |   426 +
 .../clientpositive/llap/tez_union2.q.out        |   924 +-
 .../llap/tez_union_multiinsert.q.out            |   182 +-
 .../llap/tez_vector_dynpart_hashjoin_1.q.out    |     4 +-
 .../llap/tez_vector_dynpart_hashjoin_2.q.out    |     8 +-
 .../clientpositive/llap/unionDistinct_1.q.out   |  4186 +-
 .../clientpositive/llap/union_fast_stats.q.out  |    12 +-
 .../llap/vector_adaptor_usage_mode.q.out        |     2 +-
 .../llap/vector_aggregate_without_gby.q.out     |    12 +-
 .../llap/vector_auto_smb_mapjoin_14.q.out       |    23 +-
 .../llap/vector_between_columns.q.out           |    26 +-
 .../clientpositive/llap/vector_between_in.q.out |   102 +-
 .../llap/vector_binary_join_groupby.q.out       |   116 +-
 .../clientpositive/llap/vector_bround.q.out     |     4 +-
 .../clientpositive/llap/vector_bucket.q.out     |    25 +-
 .../llap/vector_cast_constant.q.out             |     8 +-
 .../clientpositive/llap/vector_char_2.q.out     |    36 +-
 .../llap/vector_char_mapjoin1.q.out             |    33 +-
 .../llap/vector_char_simple.q.out               |    13 +-
 .../clientpositive/llap/vector_coalesce.q.out   |    45 +-
 .../clientpositive/llap/vector_coalesce_2.q.out |    10 +-
 .../llap/vector_complex_all.q.out               |     6 +-
 .../clientpositive/llap/vector_count.q.out      |    16 +-
 .../llap/vector_count_distinct.q.out            |     9 +-
 .../clientpositive/llap/vector_data_types.q.out |    41 +-
 .../llap/vector_decimal_10_0.q.out              |    12 +-
 .../llap/vector_decimal_aggregate.q.out         |    14 +-
 .../llap/vector_decimal_expressions.q.out       |     9 +-
 .../llap/vector_decimal_mapjoin.q.out           |    30 +-
 .../llap/vector_decimal_precision.q.out         |    52 +-
 .../llap/vector_decimal_round.q.out             |   123 +-
 .../llap/vector_decimal_round_2.q.out           |   210 +-
 .../llap/vector_decimal_udf.q.out               |    44 +-
 .../clientpositive/llap/vector_distinct_2.q.out |     2 +-
 .../llap/vector_empty_where.q.out               |    36 +-
 .../clientpositive/llap/vector_groupby4.q.out   |    11 +-
 .../clientpositive/llap/vector_groupby6.q.out   |    11 +-
 .../clientpositive/llap/vector_groupby_3.q.out  |     8 +-
 .../llap/vector_groupby_cube1.q.out             |   773 +
 .../llap/vector_groupby_grouping_id1.q.out      |   179 +
 .../llap/vector_groupby_grouping_id2.q.out      |   359 +
 .../llap/vector_groupby_grouping_id3.q.out      |   370 +
 .../llap/vector_groupby_grouping_sets1.q.out    |   668 +
 .../llap/vector_groupby_grouping_sets2.q.out    |   469 +
 .../llap/vector_groupby_grouping_sets3.q.out    |   314 +
 .../llap/vector_groupby_grouping_sets4.q.out    |   554 +
 .../llap/vector_groupby_grouping_sets5.q.out    |   371 +
 .../llap/vector_groupby_grouping_sets6.q.out    |   192 +
 .../vector_groupby_grouping_sets_grouping.q.out |  1224 +
 .../vector_groupby_grouping_sets_limit.q.out    |   650 +
 .../llap/vector_groupby_grouping_window.q.out   |   157 +
 .../llap/vector_groupby_mapjoin.q.out           |   142 +-
 .../llap/vector_groupby_reduce.q.out            |    83 +-
 .../llap/vector_groupby_rollup1.q.out           |   610 +
 .../llap/vector_grouping_sets.q.out             |    77 +-
 .../clientpositive/llap/vector_if_expr.q.out    |     9 +-
 .../llap/vector_include_no_sel.q.out            |     8 +-
 .../clientpositive/llap/vector_inner_join.q.out |    18 +-
 .../clientpositive/llap/vector_interval_1.q.out |    72 +-
 .../clientpositive/llap/vector_interval_2.q.out |   104 +-
 .../llap/vector_interval_arithmetic.q.out       |    66 +-
 .../llap/vector_interval_mapjoin.q.out          |    10 +-
 .../clientpositive/llap/vector_join30.q.out     |   251 +-
 .../llap/vector_join_part_col_char.q.out        |    18 +-
 .../llap/vector_left_outer_join2.q.out          |     8 +-
 .../llap/vector_leftsemi_mapjoin.q.out          |   838 +-
 .../llap/vector_mapjoin_reduce.q.out            |   167 +-
 .../llap/vector_non_constant_in_expr.q.out      |     4 +-
 .../llap/vector_non_string_partition.q.out      |   142 +-
 .../llap/vector_nullsafe_join.q.out             |    36 +-
 .../llap/vector_number_compare_projection.q.out |    18 +-
 .../clientpositive/llap/vector_order_null.q.out |  1360 +
 .../clientpositive/llap/vector_orderby_5.q.out  |    15 +-
 .../llap/vector_outer_join0.q.out               |     4 +-
 .../llap/vector_outer_join1.q.out               |    17 +-
 .../llap/vector_outer_join2.q.out               |    13 +-
 .../llap/vector_partition_diff_num_cols.q.out   |    35 +-
 .../llap/vector_partitioned_date_time.q.out     |   112 +-
 .../llap/vector_ptf_part_simple.q.out           |  3032 ++
 .../clientpositive/llap/vector_reduce1.q.out    |     9 +-
 .../clientpositive/llap/vector_reduce2.q.out    |     9 +-
 .../clientpositive/llap/vector_reduce3.q.out    |     9 +-
 .../llap/vector_reduce_groupby_decimal.q.out    |    14 +-
 .../llap/vector_string_concat.q.out             |    22 +-
 .../llap/vector_string_decimal.q.out            |    24 +-
 .../llap/vector_tablesample_rows.q.out          |     7 +-
 .../llap/vector_udf_character_length.q.out      |   287 +
 .../llap/vector_udf_octet_length.q.out          |   222 +
 .../llap/vector_varchar_mapjoin1.q.out          |     6 +-
 .../llap/vector_varchar_simple.q.out            |    13 +-
 .../llap/vector_when_case_null.q.out            |     2 +-
 .../clientpositive/llap/vectorization_0.q.out   |   100 +-
 .../clientpositive/llap/vectorization_13.q.out  |    12 +-
 .../clientpositive/llap/vectorization_15.q.out  |    10 +-
 .../clientpositive/llap/vectorization_17.q.out  |     2 +-
 .../clientpositive/llap/vectorization_7.q.out   |    18 +-
 .../clientpositive/llap/vectorization_8.q.out   |    18 +-
 .../llap/vectorization_div0.q.out               |   412 +-
 .../llap/vectorization_limit.q.out              |   714 +-
 .../llap/vectorization_offset_limit.q.out       |     9 +-
 .../llap/vectorization_part_project.q.out       |     2 +-
 .../llap/vectorization_short_regress.q.out      |   152 +-
 .../llap/vectorized_bucketmapjoin1.q.out        |    29 +-
 .../clientpositive/llap/vectorized_case.q.out   |   254 +
 .../llap/vectorized_date_funcs.q.out            |    14 +-
 .../llap/vectorized_distinct_gby.q.out          |     2 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   518 +-
 .../vectorized_dynamic_semijoin_reduction.q.out |   246 +-
 ...vectorized_dynamic_semijoin_reduction2.q.out |    32 +-
 .../clientpositive/llap/vectorized_join46.q.out |    58 +-
 .../llap/vectorized_mapjoin.q.out               |     2 +-
 .../llap/vectorized_mapjoin2.q.out              |     9 +-
 .../llap/vectorized_parquet.q.out               |   168 +-
 .../llap/vectorized_parquet_types.q.out         |   189 +-
 .../clientpositive/llap/vectorized_ptf.q.out    |  2552 +-
 .../llap/vectorized_shufflejoin.q.out           |     4 +-
 .../llap/vectorized_timestamp.q.out             |    22 +-
 .../llap/vectorized_timestamp_funcs.q.out       |    43 +-
 .../results/clientpositive/llap/windowing.q.out |    54 +-
 .../clientpositive/llap/windowing_gby.q.out     |    42 +-
 .../llap/windowing_windowspec2.q.out            |     4 +-
 .../test/results/clientpositive/llap_text.q.out |     4 +-
 .../clientpositive/louter_join_ppr.q.out        |   331 +-
 .../test/results/clientpositive/mapjoin1.q.out  |   340 +-
 .../clientpositive/mapjoin_distinct.q.out       |   256 +-
 .../results/clientpositive/mapjoin_hook.q.out   |     4 +-
 .../test/results/clientpositive/mergejoin.q.out |    23 +-
 .../results/clientpositive/mergejoins.q.out     |     2 +-
 .../clientpositive/metadata_only_queries.q.out  |    12 +-
 .../metadata_only_queries_with_filters.q.out    |    16 +-
 ql/src/test/results/clientpositive/mm_all.q.out |   100 -
 .../results/clientpositive/msck_repair_0.q.out  |     8 +
 .../clientpositive/multi_insert_gby3.q.out      |     6 +-
 .../clientpositive/multi_insert_mixed.q.out     |     6 +-
 .../clientpositive/multigroupby_singlemr.q.out  |     4 +-
 .../clientpositive/named_column_join.q.out      |   482 +
 .../clientpositive/nested_column_pruning.q.out  |     8 +-
 .../results/clientpositive/null_column.q.out    |     4 +-
 .../optimize_filter_literal.q.out               |   147 +
 .../results/clientpositive/orc_file_dump.q.out  |   120 +-
 .../results/clientpositive/orc_merge10.q.out    |     4 +-
 .../results/clientpositive/orc_merge11.q.out    |   110 +-
 .../results/clientpositive/orc_merge12.q.out    |     4 +-
 .../results/clientpositive/outer_join_ppr.q.out |   289 +-
 .../outer_reference_windowed.q.out              |   847 +
 .../clientpositive/parallel_colstats.q.out      |  1529 +
 .../clientpositive/parquet_decimal.q.out        |    24 +-
 .../parquet_mixed_partition_formats.q.out       |     1 +
 .../clientpositive/parquet_partitioned.q.out    |     1 +
 .../clientpositive/parquet_ppd_multifiles.q.out |    12 +-
 .../results/clientpositive/parquet_serde.q.out  |     1 +
 ..._non_dictionary_encoding_vectorization.q.out |     4 +-
 .../parquet_types_vectorization.q.out           |     4 +-
 .../partitions_filter_default.q.out             |    67 +
 ql/src/test/results/clientpositive/pcs.q.out    |   150 +-
 .../results/clientpositive/perf/query1.q.out    |   260 +-
 .../results/clientpositive/perf/query12.q.out   |     2 +-
 .../results/clientpositive/perf/query13.q.out   |   186 +-
 .../results/clientpositive/perf/query14.q.out   |  2004 +-
 .../results/clientpositive/perf/query15.q.out   |   120 +-
 .../results/clientpositive/perf/query16.q.out   |    60 +-
 .../results/clientpositive/perf/query17.q.out   |   214 +-
 .../results/clientpositive/perf/query18.q.out   |     2 +-
 .../results/clientpositive/perf/query19.q.out   |   190 +-
 .../results/clientpositive/perf/query20.q.out   |    70 +-
 .../results/clientpositive/perf/query21.q.out   |     2 +-
 .../results/clientpositive/perf/query22.q.out   |    98 +-
 .../results/clientpositive/perf/query23.q.out   |   832 +-
 .../results/clientpositive/perf/query25.q.out   |   212 +-
 .../results/clientpositive/perf/query26.q.out   |   126 +-
 .../results/clientpositive/perf/query29.q.out   |   212 +-
 .../results/clientpositive/perf/query3.q.out    |    70 +-
 .../results/clientpositive/perf/query30.q.out   |   353 +-
 .../results/clientpositive/perf/query31.q.out   |   606 +-
 .../results/clientpositive/perf/query32.q.out   |   156 +-
 .../results/clientpositive/perf/query36.q.out   |     4 +-
 .../results/clientpositive/perf/query37.q.out   |     2 +-
 .../results/clientpositive/perf/query38.q.out   |   210 +-
 .../results/clientpositive/perf/query39.q.out   |   196 +-
 .../results/clientpositive/perf/query40.q.out   |     2 +-
 .../results/clientpositive/perf/query42.q.out   |    70 +-
 .../results/clientpositive/perf/query46.q.out   |   126 +-
 .../results/clientpositive/perf/query48.q.out   |   158 +-
 .../results/clientpositive/perf/query5.q.out    |   302 +-
 .../results/clientpositive/perf/query51.q.out   |    84 +-
 .../results/clientpositive/perf/query52.q.out   |    70 +-
 .../results/clientpositive/perf/query54.q.out   |   244 +-
 .../results/clientpositive/perf/query55.q.out   |    70 +-
 .../results/clientpositive/perf/query58.q.out   |   472 +-
 .../results/clientpositive/perf/query6.q.out    |   351 +-
 .../results/clientpositive/perf/query64.q.out   |  1218 +-
 .../results/clientpositive/perf/query65.q.out   |   202 +-
 .../results/clientpositive/perf/query66.q.out   |     2 +-
 .../results/clientpositive/perf/query67.q.out   |     2 +-
 .../results/clientpositive/perf/query68.q.out   |   126 +-
 .../results/clientpositive/perf/query69.q.out   |   204 +-
 .../results/clientpositive/perf/query7.q.out    |   126 +-
 .../results/clientpositive/perf/query70.q.out   |   154 +-
 .../results/clientpositive/perf/query71.q.out   |   202 +-
 .../results/clientpositive/perf/query72.q.out   |   334 +-
 .../results/clientpositive/perf/query75.q.out   |    14 +-
 .../results/clientpositive/perf/query79.q.out   |    98 +-
 .../results/clientpositive/perf/query8.q.out    |   222 +-
 .../results/clientpositive/perf/query80.q.out   |     6 +-
 .../results/clientpositive/perf/query81.q.out   |   355 +-
 .../results/clientpositive/perf/query82.q.out   |     2 +-
 .../results/clientpositive/perf/query83.q.out   |   462 +-
 .../results/clientpositive/perf/query85.q.out   |   266 +-
 .../results/clientpositive/perf/query86.q.out   |     4 +-
 .../results/clientpositive/perf/query87.q.out   |   210 +-
 .../results/clientpositive/perf/query88.q.out   |   224 +-
 .../results/clientpositive/perf/query89.q.out   |    98 +-
 .../results/clientpositive/perf/query9.q.out    |   829 +
 .../results/clientpositive/perf/query91.q.out   |   178 +-
 .../results/clientpositive/perf/query92.q.out   |    42 +-
 .../results/clientpositive/perf/query97.q.out   |    42 +-
 .../results/clientpositive/perf/query98.q.out   |    70 +-
 .../clientpositive/position_alias_test_1.q.out  |   113 +-
 .../clientpositive/ppd_constant_expr.q.out      |     8 +-
 .../test/results/clientpositive/ppd_gby.q.out   |    48 +-
 .../test/results/clientpositive/ppd_gby2.q.out  |    48 +-
 .../test/results/clientpositive/ppd_join2.q.out |   108 +-
 .../test/results/clientpositive/ppd_join3.q.out |   170 +-
 .../clientpositive/ppd_outer_join1.q.out        |    74 +-
 .../results/clientpositive/ppd_windowing1.q.out |    36 +-
 .../clientpositive/primitive_types.q.out        |    10 +-
 .../results/clientpositive/ptfgroupbyjoin.q.out |    40 +-
 .../results/clientpositive/quotedid_stats.q.out |    86 +
 .../reduce_deduplicate_extended2.q.out          |   167 +-
 .../clientpositive/remove_exprs_stats.q.out     |     6 +-
 .../rename_partition_location.q.out             |    23 +
 .../clientpositive/router_join_ppr.q.out        |   319 +-
 .../test/results/clientpositive/row__id.q.out   |    34 +-
 .../clientpositive/select_column_pruning.q.out  |   141 +
 .../test/results/clientpositive/semijoin5.q.out |     2 +-
 .../results/clientpositive/show_functions.q.out |    27 +
 .../test/results/clientpositive/skewjoin.q.out  |   198 +-
 .../results/clientpositive/smb_mapjoin_20.q.out |     6 +-
 .../spark/add_part_multiple.q.out               |    16 +-
 .../spark/auto_smb_mapjoin_14.q.out             |     4 +-
 .../spark/auto_sortmerge_join_9.q.out           |     4 +-
 .../spark/avro_decimal_native.q.out             |    28 +-
 .../spark/bucket_map_join_spark1.q.out          |     8 +-
 .../spark/bucket_map_join_spark2.q.out          |     8 +-
 .../spark/bucket_map_join_spark3.q.out          |     8 +-
 .../clientpositive/spark/bucketmapjoin1.q.out   |   664 +-
 .../clientpositive/spark/bucketmapjoin2.q.out   |   624 +-
 .../clientpositive/spark/bucketmapjoin3.q.out   |   376 +-
 .../clientpositive/spark/bucketmapjoin4.q.out   |   326 +-
 .../clientpositive/spark/bucketmapjoin5.q.out   |    12 +-
 .../spark/bucketmapjoin_negative.q.out          |     2 +-
 .../spark/bucketmapjoin_negative2.q.out         |     2 +-
 .../spark/constprog_semijoin.q.out              |    16 +-
 .../spark/dynamic_rdd_cache.q.out               |    52 +-
 .../spark/filter_join_breaktask2.q.out          |    46 +-
 .../results/clientpositive/spark/groupby5.q.out |     2 +-
 .../clientpositive/spark/groupby5_noskew.q.out  |     2 +-
 .../clientpositive/spark/groupby7_map.q.out     |     4 +-
 .../groupby7_map_multi_single_reducer.q.out     |     4 +-
 .../spark/groupby7_map_skew.q.out               |     4 +-
 .../clientpositive/spark/groupby7_noskew.q.out  |     4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |     4 +-
 .../results/clientpositive/spark/groupby8.q.out |     8 +-
 .../clientpositive/spark/groupby8_map.q.out     |     4 +-
 .../spark/groupby8_map_skew.q.out               |     4 +-
 .../clientpositive/spark/groupby8_noskew.q.out  |     4 +-
 .../results/clientpositive/spark/groupby9.q.out |    28 +-
 .../spark/groupby_grouping_id2.q.out            |     9 +
 .../clientpositive/spark/groupby_position.q.out |    30 +-
 .../spark/infer_bucket_sort_map_operators.q.out |    54 +-
 .../results/clientpositive/spark/join25.q.out   |    56 +-
 .../results/clientpositive/spark/join26.q.out   |   192 +-
 .../results/clientpositive/spark/join27.q.out   |    54 +-
 .../results/clientpositive/spark/join30.q.out   |    56 +-
 .../results/clientpositive/spark/join36.q.out   |    64 +-
 .../results/clientpositive/spark/join37.q.out   |    56 +-
 .../results/clientpositive/spark/join38.q.out   |   106 +-
 .../results/clientpositive/spark/join39.q.out   |    32 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |    98 +-
 .../spark/join_cond_pushdown_1.q.out            |    86 +-
 .../spark/join_cond_pushdown_3.q.out            |    86 +-
 .../spark/join_cond_pushdown_unqual1.q.out      |     2 +-
 .../spark/join_cond_pushdown_unqual3.q.out      |     2 +-
 .../clientpositive/spark/join_merging.q.out     |    80 +-
 .../clientpositive/spark/join_reorder.q.out     |   106 +-
 .../clientpositive/spark/join_reorder4.q.out    |   339 +-
 .../clientpositive/spark/limit_pushdown.q.out   |     4 +-
 .../clientpositive/spark/louter_join_ppr.q.out  |   331 +-
 .../results/clientpositive/spark/mapjoin1.q.out |   284 +-
 .../clientpositive/spark/mapjoin_decimal.q.out  |    32 +-
 .../clientpositive/spark/mapjoin_distinct.q.out |   192 +-
 .../clientpositive/spark/mergejoins.q.out       |     2 +-
 .../spark/metadata_only_queries.q.out           |    12 +-
 .../metadata_only_queries_with_filters.q.out    |    16 +-
 .../spark/multi_insert_gby3.q.out               |     4 +-
 .../spark/multi_insert_lateral_view.q.out       |   316 +-
 .../spark/multi_insert_mixed.q.out              |     4 +-
 .../spark/multigroupby_singlemr.q.out           |     4 +-
 .../clientpositive/spark/outer_join_ppr.q.out   |   289 +-
 .../clientpositive/spark/ppd_join2.q.out        |   160 +-
 .../clientpositive/spark/ppd_join3.q.out        |   110 +-
 .../clientpositive/spark/ppd_outer_join1.q.out  |    74 +-
 .../test/results/clientpositive/spark/ptf.q.out |    12 +-
 .../clientpositive/spark/router_join_ppr.q.out  |   319 +-
 .../results/clientpositive/spark/skewjoin.q.out |   198 +-
 .../clientpositive/spark/smb_mapjoin_14.q.out   |   822 +-
 .../clientpositive/spark/smb_mapjoin_15.q.out   |   566 +-
 .../clientpositive/spark/smb_mapjoin_17.q.out   |   168 +-
 .../clientpositive/spark/smb_mapjoin_20.q.out   |     6 +-
 .../clientpositive/spark/smb_mapjoin_4.q.out    |  1093 +-
 .../clientpositive/spark/smb_mapjoin_5.q.out    |  1093 +-
 .../clientpositive/spark/smb_mapjoin_6.q.out    |   360 +-
 .../spark/spark_explainuser_1.q.out             |  5921 +++
 .../results/clientpositive/spark/stats10.q.out  |     1 +
 .../results/clientpositive/spark/stats12.q.out  |     1 +
 .../results/clientpositive/spark/stats13.q.out  |     2 +
 .../results/clientpositive/spark/stats14.q.out  |     2 +
 .../results/clientpositive/spark/stats15.q.out  |     2 +
 .../results/clientpositive/spark/stats2.q.out   |     2 +
 .../results/clientpositive/spark/stats3.q.out   |     1 +
 .../results/clientpositive/spark/stats6.q.out   |     1 +
 .../results/clientpositive/spark/stats7.q.out   |     1 +
 .../results/clientpositive/spark/stats8.q.out   |     2 +
 .../clientpositive/spark/stats_noscan_1.q.out   |     1 +
 .../clientpositive/spark/subquery_exists.q.out  |    37 +-
 .../clientpositive/spark/subquery_in.q.out      |   623 +-
 .../spark/table_access_keys_stats.q.out         |     4 +-
 .../results/clientpositive/spark/union17.q.out  |    40 +-
 .../results/clientpositive/spark/union19.q.out  |    20 +-
 .../clientpositive/spark/union_remove_15.q.out  |     1 +
 .../clientpositive/spark/union_remove_16.q.out  |     1 +
 .../clientpositive/spark/union_remove_17.q.out  |     1 +
 .../clientpositive/spark/union_remove_18.q.out  |     1 +
 .../clientpositive/spark/union_remove_19.q.out  |     4 +-
 .../spark/vector_between_in.q.out               |   122 +-
 .../spark/vector_cast_constant.q.out            |     8 +-
 .../spark/vector_count_distinct.q.out           |    14 +-
 .../spark/vector_data_types.q.out               |    41 +-
 .../spark/vector_decimal_aggregate.q.out        |    19 +-
 .../spark/vector_decimal_mapjoin.q.out          |    28 +-
 .../spark/vector_distinct_2.q.out               |     7 +-
 .../clientpositive/spark/vector_groupby_3.q.out |    13 +-
 .../spark/vector_mapjoin_reduce.q.out           |   157 +-
 .../clientpositive/spark/vector_orderby_5.q.out |    20 +-
 .../spark/vector_outer_join1.q.out              |    49 +-
 .../spark/vector_outer_join2.q.out              |    25 +-
 .../spark/vector_string_concat.q.out            |    22 +-
 .../clientpositive/spark/vectorization_0.q.out  |    99 +-
 .../clientpositive/spark/vectorization_13.q.out |    12 +-
 .../clientpositive/spark/vectorization_15.q.out |    10 +-
 .../clientpositive/spark/vectorization_17.q.out |     2 +-
 .../clientpositive/spark/vectorization_7.q.out  |    18 +-
 .../clientpositive/spark/vectorization_8.q.out  |    18 +-
 .../spark/vectorization_div0.q.out              |    18 +-
 .../spark/vectorization_part_project.q.out      |     2 +-
 .../spark/vectorization_short_regress.q.out     |   152 +-
 .../clientpositive/spark/vectorized_case.q.out  |   250 +
 .../clientpositive/spark/vectorized_ptf.q.out   |  2571 +-
 .../spark/vectorized_shufflejoin.q.out          |    18 +-
 .../spark/vectorized_timestamp_funcs.q.out      |    43 +-
 .../clientpositive/spark/windowing.q.out        |    54 +-
 .../test/results/clientpositive/stats10.q.out   |     1 +
 .../test/results/clientpositive/stats12.q.out   |     1 +
 .../test/results/clientpositive/stats13.q.out   |     2 +
 .../test/results/clientpositive/stats14.q.out   |     2 +
 .../test/results/clientpositive/stats15.q.out   |     2 +
 ql/src/test/results/clientpositive/stats2.q.out |     2 +
 ql/src/test/results/clientpositive/stats3.q.out |     1 +
 ql/src/test/results/clientpositive/stats4.q.out |     2 +
 ql/src/test/results/clientpositive/stats6.q.out |     1 +
 ql/src/test/results/clientpositive/stats7.q.out |     1 +
 ql/src/test/results/clientpositive/stats8.q.out |     2 +
 .../results/clientpositive/stats_noscan_1.q.out |     1 +
 .../test/results/clientpositive/structin.q.out  |     1 +
 .../subq_where_serialization.q.out              |   112 +-
 .../clientpositive/subquery_exists.q.out        |    43 +-
 .../clientpositive/subquery_in_having.q.out     |    81 +-
 .../clientpositive/subquery_notexists.q.out     |    20 +-
 .../subquery_notexists_having.q.out             |    24 +-
 .../clientpositive/subquery_notin_having.q.out  |   130 +-
 .../subquery_unqualcolumnrefs.q.out             |    76 +-
 .../temp_table_display_colstats_tbllvl.q.out    |    11 +-
 .../temp_table_windowing_expressions.q.out      |     4 +-
 .../clientpositive/tez/explainanalyze_2.q.out   |   230 +-
 .../clientpositive/tez/explainanalyze_3.q.out   |   127 +-
 .../clientpositive/tez/explainuser_3.q.out      |   152 +-
 .../clientpositive/tez/orc_merge12.q.out        |     4 +-
 .../clientpositive/tez/vector_aggregate_9.q.out |     4 -
 .../tez/vector_auto_smb_mapjoin_14.q.out        |    15 +-
 .../tez/vector_between_columns.q.out            |    18 +-
 .../clientpositive/tez/vector_between_in.q.out  |   102 +-
 .../tez/vector_binary_join_groupby.q.out        |    13 +-
 .../tez/vector_cast_constant.q.out              |     4 -
 .../clientpositive/tez/vector_char_2.q.out      |    12 +-
 .../tez/vector_char_mapjoin1.q.out              |    33 +-
 .../clientpositive/tez/vector_char_simple.q.out |     4 +-
 .../clientpositive/tez/vector_coalesce.q.out    |    20 +-
 .../clientpositive/tez/vector_coalesce_2.q.out  |     2 +-
 .../tez/vector_join_part_col_char.q.out         |    18 +-
 .../tez/vector_non_string_partition.q.out       |    18 +-
 .../clientpositive/tez/vectorization_div0.q.out |    18 +-
 .../tez/vectorization_limit.q.out               |   336 +-
 .../results/clientpositive/tez_join_hash.q.out  |     5 +
 .../results/clientpositive/tunable_ndv.q.out    |   220 +
 .../udaf_binarysetfunctions.q.out               |   464 +
 .../test/results/clientpositive/udaf_corr.q.out |    13 +-
 .../clientpositive/udaf_covar_samp.q.out        |    16 +-
 ql/src/test/results/clientpositive/udf1.q.out   |    32 +-
 ql/src/test/results/clientpositive/udf3.q.out   |    20 +-
 .../results/clientpositive/udf_between.q.out    |     4 +-
 .../clientpositive/udf_character_length.q.out   |   269 +
 .../test/results/clientpositive/udf_isops.q.out |   146 +
 .../results/clientpositive/udf_length.q.out     |     2 +-
 .../results/clientpositive/udf_likeall.q.out    |   187 +
 .../results/clientpositive/udf_likeany.q.out    |   187 +
 .../results/clientpositive/udf_nullif.q.out     |    37 +
 .../clientpositive/udf_octet_length.q.out       |   221 +
 .../clientpositive/udf_round_2_auto_stats.q.out |    55 +
 .../test/results/clientpositive/udf_trunc.q.out |   371 +-
 .../clientpositive/udf_width_bucket.q.out       |   680 +
 .../clientpositive/unicode_comments.q.out       |     1 +
 .../test/results/clientpositive/union17.q.out   |    40 +-
 .../test/results/clientpositive/union19.q.out   |    20 +-
 .../clientpositive/union_remove_15.q.out        |     1 +
 .../clientpositive/union_remove_16.q.out        |     1 +
 .../clientpositive/union_remove_17.q.out        |     1 +
 .../clientpositive/union_remove_18.q.out        |     1 +
 .../clientpositive/union_remove_19.q.out        |     4 +-
 .../clientpositive/updateAccessTime.q.out       |    16 +
 .../test/results/clientpositive/varchar_1.q.out |     4 +-
 .../clientpositive/vector_between_columns.q.out |     8 +-
 .../vector_binary_join_groupby.q.out            |   131 +-
 .../results/clientpositive/vector_bucket.q.out  |    21 +-
 .../clientpositive/vector_cast_constant.q.out   |    25 +-
 .../results/clientpositive/vector_char_2.q.out  |    50 +-
 .../clientpositive/vector_char_mapjoin1.q.out   |    12 +-
 .../clientpositive/vector_char_simple.q.out     |     4 +-
 .../clientpositive/vector_coalesce.q.out        |    20 +-
 .../clientpositive/vector_coalesce_2.q.out      |     8 +-
 .../results/clientpositive/vector_count.q.out   |    16 +-
 .../clientpositive/vector_data_types.q.out      |    36 +-
 .../clientpositive/vector_decimal_10_0.q.out    |    12 +-
 .../vector_decimal_aggregate.q.out              |    16 +-
 .../vector_decimal_expressions.q.out            |     4 +-
 .../clientpositive/vector_decimal_mapjoin.q.out |    28 +-
 .../vector_decimal_precision.q.out              |    52 +-
 .../clientpositive/vector_decimal_round.q.out   |   111 +-
 .../clientpositive/vector_decimal_round_2.q.out |   190 +-
 .../clientpositive/vector_distinct_2.q.out      |     4 +-
 .../clientpositive/vector_empty_where.q.out     |    16 +-
 .../clientpositive/vector_groupby4.q.out        |    21 +-
 .../clientpositive/vector_groupby6.q.out        |    21 +-
 .../clientpositive/vector_groupby_3.q.out       |    10 +-
 .../clientpositive/vector_groupby_mapjoin.q.out |   165 +-
 .../clientpositive/vector_groupby_reduce.q.out  |   128 +-
 .../clientpositive/vector_grouping_sets.q.out   |    51 +-
 .../results/clientpositive/vector_if_expr.q.out |     4 +-
 .../clientpositive/vector_include_no_sel.q.out  |     4 +-
 .../clientpositive/vector_interval_1.q.out      |    32 +-
 .../vector_interval_arithmetic.q.out            |    28 +-
 .../vector_interval_mapjoin.q.out               |     6 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |   112 +-
 .../vector_mr_diff_schema_alias.q.out           |    18 +-
 .../vector_non_constant_in_expr.q.out           |     4 +-
 .../vector_non_string_partition.q.out           |     8 +-
 .../clientpositive/vector_order_null.q.out      |  1096 +
 .../clientpositive/vector_orderby_5.q.out       |    27 +-
 .../clientpositive/vector_outer_join1.q.out     |     4 +-
 .../clientpositive/vector_outer_join2.q.out     |     4 +-
 .../clientpositive/vector_outer_join3.q.out     |     6 +-
 .../clientpositive/vector_outer_join4.q.out     |     6 +-
 .../clientpositive/vector_outer_join6.q.out     |     4 +-
 .../results/clientpositive/vector_reduce1.q.out |     4 +-
 .../results/clientpositive/vector_reduce2.q.out |     4 +-
 .../results/clientpositive/vector_reduce3.q.out |     4 +-
 .../vector_reduce_groupby_decimal.q.out         |    21 +-
 .../clientpositive/vector_string_concat.q.out   |    29 +-
 .../clientpositive/vector_string_decimal.q.out  |    23 +-
 .../vector_tablesample_rows.q.out               |    17 +-
 .../vector_udf_character_length.q.out           |   315 +
 .../vector_udf_octet_length.q.out               |   254 +
 .../clientpositive/vector_varchar_simple.q.out  |     4 +-
 .../clientpositive/vector_when_case_null.q.out  |     4 +-
 .../clientpositive/vectorization_13.q.out       |    46 +-
 .../clientpositive/vectorization_14.q.out       |     9 +-
 .../clientpositive/vectorization_15.q.out       |    19 +-
 .../clientpositive/vectorization_7.q.out        |     8 +-
 .../clientpositive/vectorization_8.q.out        |     8 +-
 .../clientpositive/vectorization_div0.q.out     |     8 +-
 .../clientpositive/vectorization_limit.q.out    |   308 +-
 .../vectorization_offset_limit.q.out            |     4 +-
 .../clientpositive/vectorized_case.q.out        |   222 +
 .../clientpositive/vectorized_date_funcs.q.out  |    21 +-
 .../clientpositive/vectorized_mapjoin2.q.out    |     4 +-
 .../vectorized_parquet_types.q.out              |    23 +-
 .../clientpositive/vectorized_shufflejoin.q.out |    23 +-
 .../clientpositive/vectorized_timestamp.q.out   |     4 +-
 .../vectorized_timestamp_funcs.q.out            |    20 +-
 .../clientpositive/windowing_distinct.q.out     |    16 +-
 .../clientpositive/windowing_expressions.q.out  |     4 +-
 .../results/clientpositive/windowing_gby2.q.out |    34 +-
 .../windowing_multipartitioning.q.out           |    12 +-
 .../clientpositive/windowing_navfn.q.out        |    12 +-
 .../clientpositive/windowing_ntile.q.out        |     8 +-
 .../clientpositive/windowing_order_null.q.out   |     4 +-
 .../windowing_range_multiorder.q.out            |     4 +-
 .../results/clientpositive/windowing_rank.q.out |    56 +-
 .../clientpositive/windowing_streaming.q.out    |     4 +-
 .../results/clientpositive/windowing_udaf.q.out |     4 +-
 .../clientpositive/windowing_windowspec.q.out   |   112 +-
 .../zero_rows_single_insert.q.out               |   113 +
 serde/pom.xml                                   |     2 +-
 .../hive/serde2/ColumnProjectionUtils.java      |     6 +-
 .../apache/hadoop/hive/serde2/SerDeUtils.java   |     2 +-
 .../apache/hadoop/hive/serde2/WriteBuffers.java |    25 +-
 .../hive/serde2/avro/AvroDeserializer.java      |     2 +-
 .../hadoop/hive/serde2/avro/AvroSerDe.java      |     6 +-
 .../fast/BinarySortableDeserializeRead.java     |    39 +-
 .../lazy/fast/LazySimpleDeserializeRead.java    |     4 +-
 .../hive/serde2/lazy/fast/StringToDouble.java   |    21 +-
 .../hive/serde2/lazybinary/LazyBinaryUtils.java |     2 +-
 .../objectinspector/ObjectInspectorFactory.java |    11 +-
 .../StandardStructObjectInspector.java          |     2 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |     2 +-
 .../hive/serde2/TestColumnProjectionUtils.java  |    14 +
 .../binarysortable/TestBinarySortableFast.java  |    13 +-
 service-rpc/pom.xml                             |     2 +-
 service/pom.xml                                 |   100 +-
 .../org/apache/hive/tmpl/QueryProfileTmpl.jamon |    78 +-
 .../hive/service/cli/JobProgressUpdate.java     |    17 +
 .../cli/ProgressMonitorStatusMapper.java        |    17 +
 .../cli/TezProgressMonitorStatusMapper.java     |    17 +
 .../cli/operation/HiveCommandOperation.java     |     8 +-
 .../cli/operation/LogDivertAppender.java        |   249 -
 .../hive/service/cli/operation/Operation.java   |    73 +-
 .../service/cli/operation/OperationManager.java |    79 +-
 .../service/cli/operation/QueryInfoCache.java   |    41 +
 .../service/cli/operation/SQLOperation.java     |    73 +-
 .../cli/operation/SQLOperationDisplay.java      |   108 -
 .../cli/operation/SQLOperationDisplayCache.java |    39 -
 .../service/cli/session/HiveSessionImpl.java    |    46 +-
 .../service/cli/session/SessionManager.java     |     4 +-
 .../cli/thrift/ThriftHttpCLIService.java        |    33 +-
 .../service/servlet/QueryProfileServlet.java    |     8 +-
 .../hive-webapps/hiveserver2/hiveserver2.jsp    |    18 +-
 .../TestQueryLifeTimeHooksWithSQLOperation.java |   115 +
 .../hive/service/server/TestHS2HttpServer.java  |     9 +-
 shims/0.23/pom.xml                              |     6 +-
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |    16 +-
 shims/aggregator/pom.xml                        |     2 +-
 shims/common/pom.xml                            |     7 +-
 .../org/apache/hadoop/hive/io/HdfsUtils.java    |     4 +-
 .../hive/io/HiveIOExceptionHandlerChain.java    |     2 +-
 .../hive/io/HiveIOExceptionHandlerUtil.java     |     4 +-
 .../apache/hadoop/hive/shims/ShimLoader.java    |     2 +-
 shims/pom.xml                                   |     2 +-
 shims/scheduler/pom.xml                         |     8 +-
 spark-client/pom.xml                            |     6 +-
 .../hive/spark/client/SparkClientUtilities.java |     3 +-
 .../org/apache/hive/spark/client/rpc/Rpc.java   |     3 +-
 .../apache/hive/spark/client/rpc/RpcServer.java |     2 +-
 storage-api/LICENSE                             |   203 +
 storage-api/NOTICE                              |     6 +
 storage-api/pom.xml                             |     4 +-
 .../hive/common/type/FastHiveDecimalImpl.java   |   143 +-
 .../hadoop/hive/common/type/RandomTypeUtil.java |    10 +-
 .../hive/ql/exec/vector/VectorizedRowBatch.java |    42 +
 .../hadoop/hive/ql/util/JavaDataModel.java      |    26 +-
 .../org/apache/hive/common/util/Murmur3.java    |    98 +-
 .../ql/exec/vector/TestStructColumnVector.java  |     3 +-
 .../apache/hive/common/util/TestMurmur3.java    |    24 +
 testutils/pom.xml                               |     2 +-
 .../ptest2/conf/cloudhost.properties.example    |    19 +-
 .../hive/testutils/jdbc/HiveBurnInClient.java   |     4 +-
 vector-code-gen/pom.xml                         |     2 +-
 2029 files changed, 175274 insertions(+), 78857 deletions(-)
----------------------------------------------------------------------



[23/50] [abbrv] hive git commit: HIVE-16275: Vectorization: Add ReduceSink support for TopN (in specialized native classes) (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
index 3e6a73f..64767d5 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
@@ -155,7 +155,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized
@@ -367,7 +367,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                       value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
             Execution mode: vectorized
@@ -406,7 +406,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -610,7 +610,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized
@@ -761,7 +761,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized
@@ -971,7 +971,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized
@@ -1184,7 +1184,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized
@@ -1399,7 +1399,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_partkey (type: int), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
             Execution mode: vectorized
@@ -1438,7 +1438,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -1613,7 +1613,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -1644,7 +1644,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_partkey (type: int), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
             Execution mode: vectorized
@@ -2214,7 +2214,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized
@@ -2423,7 +2423,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized
@@ -2701,7 +2701,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized
@@ -2914,7 +2914,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_partkey (type: int), p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized
@@ -2953,7 +2953,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -3197,7 +3197,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized
@@ -3425,7 +3425,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: double)
             Execution mode: vectorized
@@ -3677,7 +3677,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int), p_retailprice (type: double)
             Execution mode: vectorized
@@ -4125,7 +4125,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_size (type: int)
             Execution mode: vectorized
@@ -4434,7 +4434,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_name (type: string), p_size (type: int)
             Execution mode: vectorized
@@ -4738,7 +4738,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized
@@ -5019,7 +5019,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized
@@ -5342,7 +5342,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized
@@ -5635,7 +5635,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         native: true
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                     value expressions: p_size (type: int)
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
index adf6e65..b6e7519 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
@@ -53,7 +53,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:
@@ -94,7 +94,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
             Map Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
index 3892165..fe23597 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
@@ -127,7 +127,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized
@@ -308,7 +308,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized
@@ -489,7 +489,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean)
             Execution mode: vectorized
@@ -670,7 +670,7 @@ STAGE PLANS:
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
                           native: true
-                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int)
             Execution mode: vectorized
@@ -811,7 +811,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: bigint), _col3 (type: bigint)
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
index 647a095..e09cbb9 100644
--- a/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
@@ -74,10 +74,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: int)
                         sort order: +
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1024 Data size: 4096 Basic stats: COMPLETE Column stats: PARTIAL
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col1 (type: tinyint)
@@ -87,7 +86,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -198,10 +197,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: int), _col1 (type: string)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1024 Data size: 121205 Basic stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
@@ -210,7 +208,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out
index cb7dc90..bcf315e 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out
@@ -226,10 +226,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: bigint), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 174720 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: decimal(22,21))
@@ -239,7 +238,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 
@@ -445,10 +444,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: double), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 1365 Data size: 65520 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
                         value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double)
@@ -458,7 +456,7 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
index 2993eb9..b1d6c1c 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
@@ -63,17 +63,17 @@ POSTHOOK: query: SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdoubl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--1887561756	1839.0
 -1887561756	-10011.0
 -1887561756	-13877.0
--1887561756	10361.0
--1887561756	-8881.0
 -1887561756	-2281.0
+-1887561756	-8881.0
+-1887561756	10361.0
+-1887561756	1839.0
 -1887561756	9531.0
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain vectorization detail
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -119,10 +119,9 @@ STAGE PLANS:
                         key expressions: _col0 (type: tinyint), _col1 (type: double)
                         sort order: ++
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkObjectHashOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 9173 Data size: 109584 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.3
                         value expressions: _col2 (type: smallint)
@@ -132,18 +131,29 @@ STAGE PLANS:
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
                 groupByVectorOutput: true
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
+                allNative: true
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0, 1, 5]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 3
+                    dataColumns: KEY.reducesinkkey0:tinyint, KEY.reducesinkkey1:double, VALUE._col0:smallint
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: smallint)
@@ -184,10 +194,17 @@ POSTHOOK: query: select ctinyint,cdouble,csmallint from alltypesorc where ctinyi
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--64	-15920.0	-15920
 -64	-10462.0	-10462
--64	-9842.0	-9842
--64	-8080.0	-8080
+-64	-15920.0	-15920
+-64	-1600.0	-1600
+-64	-200.0	-200
+-64	-2919.0	-2919
+-64	-3097.0	-3097
+-64	-3586.0	-3586
+-64	-4018.0	-4018
+-64	-4040.0	-4040
+-64	-4803.0	-4803
+-64	-6907.0	-6907
 -64	-7196.0	-7196
 -64	-7196.0	-7196
 -64	-7196.0	-7196
@@ -195,19 +212,12 @@ POSTHOOK: Input: default@alltypesorc
 -64	-7196.0	-7196
 -64	-7196.0	-7196
 -64	-7196.0	-7196
--64	-6907.0	-6907
--64	-4803.0	-4803
--64	-4040.0	-4040
--64	-4018.0	-4018
--64	-3586.0	-3586
--64	-3097.0	-3097
--64	-2919.0	-2919
--64	-1600.0	-1600
--64	-200.0	-200
-PREHOOK: query: explain vectorization expression
+-64	-8080.0	-8080
+-64	-9842.0	-9842
+PREHOOK: query: explain vectorization detail
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -273,6 +283,12 @@ STAGE PLANS:
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0, 5]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: double
         Reducer 2 
             Reduce Vectorization:
                 enabled: true
@@ -311,30 +327,30 @@ POSTHOOK: query: select ctinyint,avg(cdouble + 1) from alltypesorc group by ctin
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	9370.0945309795
--64	373.52941176470586
--63	2178.7272727272725
--62	245.69387755102042
--61	914.3404255319149
--60	1071.82
--59	318.27272727272725
--58	3483.2444444444445
--57	1867.0535714285713
--56	2595.818181818182
--55	2385.595744680851
--54	2712.7272727272725
--53	-532.7567567567568
--52	2810.705882352941
--51	-96.46341463414635
--50	-960.0192307692307
--49	768.7659574468086
--48	1672.909090909091
--47	-574.6428571428571
 -46	3033.55
-PREHOOK: query: explain vectorization expression
+-47	-574.6428571428571
+-48	1672.909090909091
+-49	768.7659574468086
+-50	-960.0192307692307
+-51	-96.46341463414635
+-52	2810.705882352941
+-53	-532.7567567567568
+-54	2712.7272727272725
+-55	2385.595744680851
+-56	2595.818181818182
+-57	1867.0535714285713
+-58	3483.2444444444445
+-59	318.27272727272725
+-60	1071.82
+-61	914.3404255319149
+-62	245.69387755102042
+-63	2178.7272727272725
+-64	373.52941176470586
+NULL	9370.0945309795
+PREHOOK: query: explain vectorization detail
 select distinct(ctinyint) from alltypesorc limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select distinct(ctinyint) from alltypesorc limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -385,10 +401,9 @@ STAGE PLANS:
                         sort order: +
                         Map-reduce partition columns: _col0 (type: tinyint)
                         Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false
+                            className: VectorReduceSinkLongOperator
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 95 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.3
             Execution mode: vectorized
@@ -400,15 +415,26 @@ STAGE PLANS:
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 1
+                    dataColumns: KEY._col0:tinyint
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 Group By Vectorization:
@@ -452,30 +478,30 @@ POSTHOOK: query: select distinct(ctinyint) from alltypesorc limit 20
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL
--64
--63
--62
--61
--60
--59
--58
--57
--56
--55
--54
--53
--52
--51
--50
--49
--48
--47
 -46
-PREHOOK: query: explain vectorization expression
+-47
+-48
+-49
+-50
+-51
+-52
+-53
+-54
+-55
+-56
+-57
+-58
+-59
+-60
+-61
+-62
+-63
+-64
+NULL
+PREHOOK: query: explain vectorization detail
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -528,7 +554,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkObjectHashOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized
             Map Vectorization:
@@ -539,15 +565,26 @@ STAGE PLANS:
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0, 5]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY._col0:tinyint, KEY._col1:double
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 Group By Vectorization:
@@ -604,30 +641,30 @@ POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc grou
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	2932
--64	24
--63	19
--62	27
--61	25
--60	27
--59	31
--58	23
--57	35
--56	36
--55	29
--54	26
--53	22
--52	33
--51	21
--50	30
--49	26
--48	29
--47	22
 -46	24
-PREHOOK: query: explain vectorization expression
+-47	22
+-48	29
+-49	26
+-50	30
+-51	21
+-52	33
+-53	22
+-54	26
+-55	29
+-56	36
+-57	35
+-58	23
+-59	31
+-60	27
+-61	25
+-62	27
+-63	19
+-64	24
+NULL	2932
+PREHOOK: query: explain vectorization detail
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select ctinyint,cdouble from alltypesorc order by ctinyint limit 0
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -652,10 +689,10 @@ POSTHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limi
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain vectorization detail
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain vectorization detail
 select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20
 POSTHOOK: type: QUERY
 PLAN VECTORIZATION:
@@ -710,7 +747,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkMultiKeyOperator
                             native: true
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         Statistics: Num rows: 3185 Data size: 44512 Basic stats: COMPLETE Column stats: COMPLETE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized
@@ -722,15 +759,26 @@ STAGE PLANS:
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 12
+                    includeColumns: [0, 5]
+                    dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
+                    partitionColumnCount: 0
         Reducer 2 
             Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: a
+                reduceColumnSortOrder: +
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY._col0:double, VALUE._col0:bigint
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
@@ -749,10 +797,9 @@ STAGE PLANS:
                   key expressions: _col1 (type: bigint), _col0 (type: double)
                   sort order: ++
                   Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false
+                      className: VectorReduceSinkObjectHashOperator
+                      native: true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   Statistics: Num rows: 3185 Data size: 44512 Basic stats: COMPLETE Column stats: COMPLETE
                   TopN Hash Memory Usage: 0.3
         Reducer 3 
@@ -760,10 +807,16 @@ STAGE PLANS:
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
                 groupByVectorOutput: true
                 allNative: false
                 usesVectorUDFAdaptor: false
                 vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY.reducesinkkey0:bigint, KEY.reducesinkkey1:double
+                    partitionColumnCount: 0
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: double), KEY.reducesinkkey0 (type: bigint)
@@ -804,23 +857,23 @@ POSTHOOK: query: select cdouble, sum(ctinyint) as sum from alltypesorc where cti
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-NULL	-32768
+-10462.0	-64
+-1121.0	-89
+-11322.0	-101
+-11492.0	-78
+-15920.0	-64
+-4803.0	-64
+-6907.0	-64
 -7196.0	-2009
+-8080.0	-64
+-8118.0	-80
+-9842.0	-64
+10496.0	-67
 15601.0	-1733
-4811.0	-115
--11322.0	-101
--1121.0	-89
-7705.0	-88
 3520.0	-86
--8118.0	-80
+4811.0	-115
 5241.0	-80
--11492.0	-78
-9452.0	-76
 557.0	-75
-10496.0	-67
--15920.0	-64
--10462.0	-64
--9842.0	-64
--8080.0	-64
--6907.0	-64
--4803.0	-64
+7705.0	-88
+9452.0	-76
+NULL	-32768

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
index 6422bd5..3519a87 100644
--- a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
@@ -201,7 +201,7 @@ STAGE PLANS:
                         Reduce Sink Vectorization:
                             className: VectorReduceSinkOperator
                             native: false
-                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
@@ -250,7 +250,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
@@ -357,7 +357,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
@@ -409,7 +409,7 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                   nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 50 Data size: 14819 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_bucket.q.out b/ql/src/test/results/clientpositive/vector_bucket.q.out
index 0eeb8a5..3b74023 100644
--- a/ql/src/test/results/clientpositive/vector_bucket.q.out
+++ b/ql/src/test/results/clientpositive/vector_bucket.q.out
@@ -45,7 +45,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col0 (type: string), _col1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
index c323add..7afdb72 100644
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
@@ -201,8 +201,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out b/ql/src/test/results/clientpositive/vector_char_2.q.out
index d4e5225..03bf436 100644
--- a/ql/src/test/results/clientpositive/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_2.q.out
@@ -107,8 +107,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint), _col2 (type: bigint)
@@ -156,8 +156,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint), _col2 (type: bigint)
@@ -298,8 +298,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 500 Data size: 99000 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: bigint), _col2 (type: bigint)
@@ -347,8 +347,8 @@ STAGE PLANS:
               Reduce Sink Vectorization:
                   className: VectorReduceSinkOperator
                   native: false
-                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                  nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                  nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
               Statistics: Num rows: 250 Data size: 49500 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
               value expressions: _col1 (type: bigint), _col2 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out b/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
index d1bdadb..3b022d9 100644
--- a/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
@@ -204,7 +204,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: char(10))
@@ -335,7 +335,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: char(20))
@@ -468,7 +468,7 @@ STAGE PLANS:
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
-                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 3 Data size: 323 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: char(10)), _col2 (type: int), _col3 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_char_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_simple.q.out b/ql/src/test/results/clientpositive/vector_char_simple.q.out
index c7b00fd..72ea17b 100644
--- a/ql/src/test/results/clientpositive/vector_char_simple.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_simple.q.out
@@ -228,8 +228,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Execution mode: vectorized
       Map Vectorization:
           enabled: true

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce.q.out b/ql/src/test/results/clientpositive/vector_coalesce.q.out
index f158236..87ab937 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce.q.out
@@ -37,8 +37,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -122,8 +122,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -206,8 +206,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -291,8 +291,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -375,8 +375,8 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Execution mode: vectorized
       Map Vectorization:
           enabled: true

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
index 6778499..431cfdc 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
@@ -220,7 +220,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 4 Data size: 510 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_count.q.out b/ql/src/test/results/clientpositive/vector_count.q.out
index 3473759..ff6993e 100644
--- a/ql/src/test/results/clientpositive/vector_count.q.out
+++ b/ql/src/test/results/clientpositive/vector_count.q.out
@@ -93,7 +93,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                   Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col5 (type: bigint)
@@ -196,7 +196,7 @@ STAGE PLANS:
                   Reduce Sink Vectorization:
                       className: VectorReduceSinkOperator
                       native: false
-                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                      nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                   Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint)
@@ -283,7 +283,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                 Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                 value expressions: d (type: int)
@@ -373,7 +373,7 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                     nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No DISTINCT columns IS false
                 Statistics: Num rows: 7 Data size: 100 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/ec8c390e/ql/src/test/results/clientpositive/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out
index b0bdeb1..f6d20ae 100644
--- a/ql/src/test/results/clientpositive/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/vector_data_types.q.out
@@ -221,8 +221,8 @@ STAGE PLANS:
                 Reduce Sink Vectorization:
                     className: VectorReduceSinkOperator
                     native: false
-                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false, No TopN IS false
+                    nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                 Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                 TopN Hash Memory Usage: 0.1
                 value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary)


[34/50] [abbrv] hive git commit: HIVE-16450: Some metastore operations are not retried even with desired underlining exceptions (Aihua Xu, reviewed by Naveen Gangam & Peter Vary)

Posted by we...@apache.org.
HIVE-16450: Some metastore operations are not retried even with desired underlining exceptions (Aihua Xu, reviewed by Naveen Gangam & Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/301e7c5e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/301e7c5e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/301e7c5e

Branch: refs/heads/hive-14535
Commit: 301e7c5eaba790687818a57d92b046f746bb3d76
Parents: 54dbca6
Author: Aihua Xu <ai...@apache.org>
Authored: Fri Apr 14 10:53:58 2017 -0400
Committer: Aihua Xu <ai...@apache.org>
Committed: Mon May 8 10:20:47 2017 -0400

----------------------------------------------------------------------
 .../apache/hadoop/hive/metastore/Deadline.java  | 29 ++++++-----------
 .../hadoop/hive/metastore/MetaStoreUtils.java   | 24 ++++++++++++++
 .../hadoop/hive/metastore/ObjectStore.java      | 34 ++++++++------------
 .../hive/metastore/RetryingHMSHandler.java      |  2 --
 4 files changed, 47 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/301e7c5e/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java
index 71d336a..6149224 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java
@@ -86,15 +86,15 @@ public class Deadline {
    */
   public static void resetTimeout(long timeoutMs) throws MetaException {
     if (timeoutMs <= 0) {
-      throw newMetaException(new DeadlineException("The reset timeout value should be " +
+      throw MetaStoreUtils.newMetaException(new DeadlineException("The reset timeout value should be " +
           "larger than 0: " + timeoutMs));
     }
     Deadline deadline = getCurrentDeadline();
     if (deadline != null) {
       deadline.timeoutNanos = timeoutMs * 1000000L;
     } else {
-      throw newMetaException(new DeadlineException("The threadlocal Deadline is null," +
-          " please register it firstly."));
+      throw MetaStoreUtils.newMetaException(new DeadlineException("The threadlocal Deadline is null," +
+          " please register it first."));
     }
   }
 
@@ -105,8 +105,8 @@ public class Deadline {
   public static boolean startTimer(String method) throws MetaException {
     Deadline deadline = getCurrentDeadline();
     if (deadline == null) {
-      throw newMetaException(new DeadlineException("The threadlocal Deadline is null," +
-          " please register it firstly."));
+      throw MetaStoreUtils.newMetaException(new DeadlineException("The threadlocal Deadline is null," +
+          " please register it first."));
     }
     if (deadline.startTime != NO_DEADLINE) return false;
     deadline.method = method;
@@ -125,8 +125,8 @@ public class Deadline {
       deadline.startTime = NO_DEADLINE;
       deadline.method = null;
     } else {
-      throw newMetaException(new DeadlineException("The threadlocal Deadline is null," +
-          " please register it firstly."));
+      throw MetaStoreUtils.newMetaException(new DeadlineException("The threadlocal Deadline is null," +
+          " please register it first."));
     }
   }
 
@@ -146,7 +146,7 @@ public class Deadline {
     if (deadline != null) {
       deadline.check();
     } else {
-      throw newMetaException(new DeadlineException("The threadlocal Deadline is null," +
+      throw MetaStoreUtils.newMetaException(new DeadlineException("The threadlocal Deadline is null," +
           " please register it first."));
     }
   }
@@ -165,18 +165,7 @@ public class Deadline {
             + (elapsedTime / 1000000L) + "ms exceeds " + (timeoutNanos / 1000000L)  + "ms");
       }
     } catch (DeadlineException e) {
-      throw newMetaException(e);
+      throw MetaStoreUtils.newMetaException(e);
     }
   }
-
-  /**
-   * convert DeadlineException to MetaException
-   * @param e
-   * @return
-   */
-  private static MetaException newMetaException(DeadlineException e) {
-    MetaException metaException = new MetaException(e.getMessage());
-    metaException.initCause(e);
-    return metaException;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/301e7c5e/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index d67e03f..870896c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -1929,4 +1929,28 @@ public class MetaStoreUtils {
     }
     csNew.setStatsObj(list);
   }
+
+  /**
+   * convert Exception to MetaException, which sets the cause to such exception
+   * @param e cause of the exception
+   * @return  the MetaException with the specified exception as the cause
+   */
+  public static MetaException newMetaException(Exception e) {
+    return newMetaException(e != null ? e.getMessage() : null, e);
+  }
+
+  /**
+   * convert Exception to MetaException, which sets the cause to such exception
+   * @param errorMessage  the error message for this MetaException
+   * @param e             cause of the exception
+   * @return  the MetaException with the specified exception as the cause
+   */
+  public static MetaException newMetaException(String errorMessage, Exception e) {
+    MetaException metaException = new MetaException(errorMessage);
+    if (e != null) {
+      metaException.initCause(e);
+    }
+    return metaException;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/301e7c5e/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 29aa642..a83e12e 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -2737,7 +2737,7 @@ public class ObjectStore implements RawStore, Configurable {
         throw ex;
       } catch (Exception ex) {
         LOG.error("", ex);
-        throw new MetaException(ex.getMessage());
+        throw MetaStoreUtils.newMetaException(ex);
       } finally {
         close();
       }
@@ -2767,7 +2767,7 @@ public class ObjectStore implements RawStore, Configurable {
         if (ex instanceof MetaException) {
           throw (MetaException)ex;
         }
-        throw new MetaException(ex.getMessage());
+        throw MetaStoreUtils.newMetaException(ex);
       }
       if (!isInTxn) {
         JDOException rollbackEx = null;
@@ -3302,12 +3302,8 @@ public class ObjectStore implements RawStore, Configurable {
     } finally {
       if (!success) {
         rollbackTransaction();
-        MetaException metaException = new MetaException(
-            "The transaction for alter partition did not commit successfully.");
-        if (e != null) {
-          metaException.initCause(e);
-        }
-        throw metaException;
+        throw MetaStoreUtils.newMetaException(
+            "The transaction for alter partition did not commit successfully.", e);
       }
     }
   }
@@ -3331,12 +3327,8 @@ public class ObjectStore implements RawStore, Configurable {
     } finally {
       if (!success) {
         rollbackTransaction();
-        MetaException metaException = new MetaException(
-            "The transaction for alter partition did not commit successfully.");
-        if (e != null) {
-          metaException.initCause(e);
-        }
-        throw metaException;
+        throw MetaStoreUtils.newMetaException(
+            "The transaction for alter partition did not commit successfully.", e);
       }
     }
   }
@@ -6782,8 +6774,10 @@ public class ObjectStore implements RawStore, Configurable {
     try {
       List<MTableColumnStatistics> stats = getMTableColumnStatistics(table,
           colNames, queryWrapper);
-      for(MTableColumnStatistics cStat : stats) {
-        statsMap.put(cStat.getColName(), cStat);
+      if (stats != null) {
+        for(MTableColumnStatistics cStat : stats) {
+          statsMap.put(cStat.getColName(), cStat);
+        }
       }
     } finally {
       queryWrapper.close();
@@ -6946,7 +6940,7 @@ public class ObjectStore implements RawStore, Configurable {
       if (ex instanceof MetaException) {
         throw (MetaException) ex;
       }
-      throw new MetaException(ex.getMessage());
+      throw MetaStoreUtils.newMetaException(ex);
     } finally {
       if (!committed) {
         rollbackTransaction();
@@ -6994,7 +6988,7 @@ public class ObjectStore implements RawStore, Configurable {
 
         try {
         List<MTableColumnStatistics> mStats = getMTableColumnStatistics(getTable(), colNames, queryWrapper);
-        if (mStats.isEmpty()) return null;
+        if (mStats == null || mStats.isEmpty()) return null;
         // LastAnalyzed is stored per column, but thrift object has it per multiple columns.
         // Luckily, nobody actually uses it, so we will set to lowest value of all columns for now.
         ColumnStatisticsDesc desc = StatObjectConverter.getTableColumnStatisticsDesc(mStats.get(0));
@@ -7184,7 +7178,7 @@ public class ObjectStore implements RawStore, Configurable {
       if (ex instanceof MetaException) {
         throw (MetaException) ex;
       }
-      throw new MetaException(ex.getMessage());
+      throw MetaStoreUtils.newMetaException(ex);
     } finally {
       if (!committed) {
         rollbackTransaction();
@@ -7662,7 +7656,7 @@ public class ObjectStore implements RawStore, Configurable {
           throw new MetaException("Version table not found. " + "The metastore is not upgraded to "
               + MetaStoreSchemaInfo.getHiveSchemaVersion());
         } else {
-          throw e;
+          throw MetaStoreUtils.newMetaException(e);
         }
       }
       committed = commitTransaction();

http://git-wip-us.apache.org/repos/asf/hive/blob/301e7c5e/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
index f19ff6c..b1c8e39 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
@@ -153,8 +153,6 @@ public class RetryingHMSHandler implements InvocationHandler {
         }
         return new Result(object, retryCount);
 
-      } catch (javax.jdo.JDOException e) {
-        caughtException = e;
       } catch (UndeclaredThrowableException e) {
         if (e.getCause() != null) {
           if (e.getCause() instanceof javax.jdo.JDOException) {


[19/50] [abbrv] hive git commit: HIVE-16268 : enable incremental repl dump to handle functions metadata (Anishek Agarwal, reviewed by Sushanth Sowmyan)

Posted by we...@apache.org.
HIVE-16268 : enable incremental repl dump to handle functions metadata (Anishek Agarwal, reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9d4f13af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9d4f13af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9d4f13af

Branch: refs/heads/hive-14535
Commit: 9d4f13afda34250b7cf722287a557426e90ff24d
Parents: 699d6ce
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu May 4 02:48:27 2017 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Sun May 7 15:43:21 2017 -0700

----------------------------------------------------------------------
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 306 ++-----------------
 .../hadoop/hive/ql/parse/repl/DumpType.java     |   1 +
 .../parse/repl/dump/events/AbstractHandler.java |  46 +++
 .../repl/dump/events/AddPartitionHandler.java   | 114 +++++++
 .../repl/dump/events/AlterPartitionHandler.java | 112 +++++++
 .../repl/dump/events/AlterTableHandler.java     | 102 +++++++
 .../repl/dump/events/CreateFunctionHandler.java |  36 +++
 .../repl/dump/events/CreateTableHandler.java    |  86 ++++++
 .../parse/repl/dump/events/DefaultHandler.java  |  44 +++
 .../repl/dump/events/DropPartitionHandler.java  |  44 +++
 .../repl/dump/events/DropTableHandler.java      |  44 +++
 .../ql/parse/repl/dump/events/EventHandler.java |  62 ++++
 .../repl/dump/events/EventHandlerFactory.java   |  76 +++++
 .../parse/repl/dump/events/InsertHandler.java   | 110 +++++++
 .../ql/parse/repl/events/AbstractHandler.java   |  46 ---
 .../parse/repl/events/AddPartitionHandler.java  | 114 -------
 .../repl/events/AlterPartitionHandler.java      | 112 -------
 .../ql/parse/repl/events/AlterTableHandler.java | 102 -------
 .../parse/repl/events/CreateTableHandler.java   |  86 ------
 .../ql/parse/repl/events/DefaultHandler.java    |  44 ---
 .../parse/repl/events/DropPartitionHandler.java |  44 ---
 .../ql/parse/repl/events/DropTableHandler.java  |  44 ---
 .../hive/ql/parse/repl/events/EventHandler.java |  62 ----
 .../parse/repl/events/EventHandlerFactory.java  |  75 -----
 .../ql/parse/repl/events/InsertHandler.java     | 110 -------
 .../load/message/AbstractMessageHandler.java    |  67 ++++
 .../parse/repl/load/message/DefaultHandler.java |  33 ++
 .../repl/load/message/DropPartitionHandler.java | 108 +++++++
 .../repl/load/message/DropTableHandler.java     |  51 ++++
 .../parse/repl/load/message/InsertHandler.java  |  47 +++
 .../parse/repl/load/message/MessageHandler.java |  91 ++++++
 .../load/message/MessageHandlerFactory.java     |  79 +++++
 .../load/message/RenamePartitionHandler.java    |  74 +++++
 .../repl/load/message/RenameTableHandler.java   |  81 +++++
 .../parse/repl/load/message/TableHandler.java   |  68 +++++
 .../load/message/TruncatePartitionHandler.java  |  69 +++++
 .../repl/load/message/TruncateTableHandler.java |  50 +++
 .../dump/events/TestEventHandlerFactory.java    |  62 ++++
 .../repl/events/TestEventHandlerFactory.java    |  62 ----
 39 files changed, 1781 insertions(+), 1183 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
index 2daa123..5d1d2fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
@@ -27,22 +27,15 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventUtils;
+import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFilter;
-import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
-import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
-import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage;
-import org.apache.hadoop.hive.metastore.messaging.DropTableMessage;
-import org.apache.hadoop.hive.metastore.messaging.EventUtils;
-import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
-import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
-import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.Task;
@@ -53,29 +46,23 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.repl.DumpType;
 import org.apache.hadoop.hive.ql.parse.repl.dump.HiveWrapper;
+import org.apache.hadoop.hive.ql.parse.repl.dump.events.EventHandler;
+import org.apache.hadoop.hive.ql.parse.repl.dump.events.EventHandlerFactory;
 import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 import org.apache.hadoop.hive.ql.parse.repl.dump.io.FunctionSerializer;
 import org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter;
 import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
-import org.apache.hadoop.hive.ql.parse.repl.events.EventHandler;
-import org.apache.hadoop.hive.ql.parse.repl.events.EventHandlerFactory;
 import org.apache.hadoop.hive.ql.parse.repl.load.MetaData;
+import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandler;
+import org.apache.hadoop.hive.ql.parse.repl.load.message.MessageHandlerFactory;
 import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.CreateFunctionDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork;
-import org.apache.hadoop.hive.ql.plan.DropTableDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.FunctionWork;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc;
-import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -87,7 +74,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -681,270 +667,26 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
   }
 
   private List<Task<? extends Serializable>> analyzeEventLoad(
-      String dbName, String tblName, String locn,
-      Task<? extends Serializable> precursor,
-      Map<String, Long> dbsUpdated, Map<String, Long> tablesUpdated,
-      DumpMetaData dmd) throws SemanticException {
-    MessageDeserializer md = MessageFactory.getInstance().getDeserializer();
-    switch (dmd.getDumpType()) {
-      case EVENT_CREATE_TABLE: {
-        return analyzeTableLoad(dbName, tblName, locn, precursor, dbsUpdated, tablesUpdated);
-      }
-      case EVENT_ADD_PARTITION: {
-        return analyzeTableLoad(dbName, tblName, locn, precursor, dbsUpdated, tablesUpdated);
-      }
-      case EVENT_DROP_TABLE: {
-        DropTableMessage dropTableMessage = md.getDropTableMessage(dmd.getPayload());
-        String actualDbName = ((dbName == null) || dbName.isEmpty() ? dropTableMessage.getDB() : dbName);
-        String actualTblName = ((tblName == null) || tblName.isEmpty() ? dropTableMessage.getTable() : tblName);
-        DropTableDesc dropTableDesc = new DropTableDesc(
-            actualDbName + "." + actualTblName,
-            null, true, true,
-            getNewEventOnlyReplicationSpec(dmd.getEventFrom()));
-        Task<DDLWork> dropTableTask = TaskFactory.get(new DDLWork(inputs, outputs, dropTableDesc), conf);
-        if (precursor != null){
-          precursor.addDependentTask(dropTableTask);
-        }
-        List<Task<? extends Serializable>> tasks = new ArrayList<Task<? extends Serializable>>();
-        tasks.add(dropTableTask);
-        LOG.debug("Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName());
-        dbsUpdated.put(actualDbName,dmd.getEventTo());
-        return tasks;
-      }
-      case EVENT_DROP_PARTITION: {
-        try {
-          DropPartitionMessage dropPartitionMessage = md.getDropPartitionMessage(dmd.getPayload());
-          String actualDbName = ((dbName == null) || dbName.isEmpty() ? dropPartitionMessage.getDB() : dbName);
-          String actualTblName = ((tblName == null) || tblName.isEmpty() ? dropPartitionMessage.getTable() : tblName);
-          Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs;
-          partSpecs =
-              genPartSpecs(new Table(dropPartitionMessage.getTableObj()),
-                  dropPartitionMessage.getPartitions());
-          if (partSpecs.size() > 0) {
-            DropTableDesc dropPtnDesc = new DropTableDesc(
-                actualDbName + "." + actualTblName,
-                partSpecs, null, true,
-                getNewEventOnlyReplicationSpec(dmd.getEventFrom()));
-            Task<DDLWork> dropPtnTask =
-                TaskFactory.get(new DDLWork(inputs, outputs, dropPtnDesc), conf);
-            if (precursor != null) {
-              precursor.addDependentTask(dropPtnTask);
-            }
-            List<Task<? extends Serializable>> tasks = new ArrayList<Task<? extends Serializable>>();
-            tasks.add(dropPtnTask);
-            LOG.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(),
-                dropPtnDesc.getTableName(), dropPartitionMessage.getPartitions());
-            dbsUpdated.put(actualDbName, dmd.getEventTo());
-            tablesUpdated.put(actualDbName + "." + actualTblName, dmd.getEventTo());
-            return tasks;
-          } else {
-            throw new SemanticException(
-                "DROP PARTITION EVENT does not return any part descs for event message :"
-                    + dmd.getPayload());
-          }
-        } catch (Exception e) {
-          if (!(e instanceof SemanticException)){
-            throw new SemanticException("Error reading message members", e);
-          } else {
-            throw (SemanticException)e;
-          }
-        }
-      }
-      case EVENT_ALTER_TABLE: {
-        return analyzeTableLoad(dbName, tblName, locn, precursor, dbsUpdated, tablesUpdated);
-      }
-      case EVENT_RENAME_TABLE: {
-        AlterTableMessage renameTableMessage = md.getAlterTableMessage(dmd.getPayload());
-        if ((tblName != null) && (!tblName.isEmpty())){
-          throw new SemanticException("RENAMES of tables are not supported for table-level replication");
-        }
-        try {
-          String oldDbName = renameTableMessage.getTableObjBefore().getDbName();
-          String newDbName = renameTableMessage.getTableObjAfter().getDbName();
-
-          if ((dbName != null) && (!dbName.isEmpty())){
-            // If we're loading into a db, instead of into the warehouse, then the oldDbName and
-            // newDbName must be the same
-            if (!oldDbName.equalsIgnoreCase(newDbName)){
-              throw new SemanticException("Cannot replicate an event renaming a table across"
-                  + " databases into a db level load " + oldDbName +"->" + newDbName);
-            } else {
-              // both were the same, and can be replaced by the new db we're loading into.
-              oldDbName = dbName;
-              newDbName = dbName;
-            }
-          }
-
-          String oldName = oldDbName + "." + renameTableMessage.getTableObjBefore().getTableName();
-          String newName = newDbName + "." + renameTableMessage.getTableObjAfter().getTableName();
-          AlterTableDesc renameTableDesc = new AlterTableDesc(oldName, newName, false);
-          Task<DDLWork> renameTableTask = TaskFactory.get(new DDLWork(inputs, outputs, renameTableDesc), conf);
-          if (precursor != null){
-            precursor.addDependentTask(renameTableTask);
-          }
-          List<Task<? extends Serializable>> tasks = new ArrayList<Task<? extends Serializable>>();
-          tasks.add(renameTableTask);
-          LOG.debug("Added rename table task : {}:{}->{}", renameTableTask.getId(), oldName, newName);
-          dbsUpdated.put(newDbName, dmd.getEventTo()); // oldDbName and newDbName *will* be the same if we're here
-          tablesUpdated.remove(oldName);
-          tablesUpdated.put(newName, dmd.getEventTo());
-          // Note : edge-case here in interaction with table-level REPL LOAD, where that nukes out tablesUpdated
-          // However, we explicitly don't support repl of that sort, and error out above if so. If that should
-          // ever change, this will need reworking.
-          return tasks;
-        } catch (Exception e) {
-          if (!(e instanceof SemanticException)){
-            throw new SemanticException("Error reading message members", e);
-          } else {
-            throw (SemanticException)e;
-          }
-        }
-      }
-      case EVENT_TRUNCATE_TABLE: {
-        AlterTableMessage truncateTableMessage = md.getAlterTableMessage(dmd.getPayload());
-        String actualDbName = ((dbName == null) || dbName.isEmpty() ? truncateTableMessage.getDB() : dbName);
-        String actualTblName = ((tblName == null) || tblName.isEmpty() ? truncateTableMessage.getTable() : tblName);
-
-        TruncateTableDesc truncateTableDesc = new TruncateTableDesc(
-                actualDbName + "." + actualTblName, null);
-        Task<DDLWork> truncateTableTask = TaskFactory.get(new DDLWork(inputs, outputs, truncateTableDesc), conf);
-        if (precursor != null) {
-          precursor.addDependentTask(truncateTableTask);
-        }
-
-        List<Task<? extends Serializable>> tasks = new ArrayList<Task<? extends Serializable>>();
-        tasks.add(truncateTableTask);
-        LOG.debug("Added truncate tbl task : {}:{}", truncateTableTask.getId(), truncateTableDesc.getTableName());
-        dbsUpdated.put(actualDbName,dmd.getEventTo());
-        return tasks;
-      }
-      case EVENT_ALTER_PARTITION: {
-        return analyzeTableLoad(dbName, tblName, locn, precursor, dbsUpdated, tablesUpdated);
-      }
-      case EVENT_RENAME_PARTITION: {
-        AlterPartitionMessage renamePtnMessage = md.getAlterPartitionMessage(dmd.getPayload());
-        String actualDbName = ((dbName == null) || dbName.isEmpty() ? renamePtnMessage.getDB() : dbName);
-        String actualTblName = ((tblName == null) || tblName.isEmpty() ? renamePtnMessage.getTable() : tblName);
-
-        Map<String, String> newPartSpec = new LinkedHashMap<String,String>();
-        Map<String, String> oldPartSpec = new LinkedHashMap<String,String>();
-        String tableName = actualDbName + "." + actualTblName;
-        try {
-          org.apache.hadoop.hive.metastore.api.Table tblObj = renamePtnMessage.getTableObj();
-          org.apache.hadoop.hive.metastore.api.Partition pobjBefore = renamePtnMessage.getPtnObjBefore();
-          org.apache.hadoop.hive.metastore.api.Partition pobjAfter = renamePtnMessage.getPtnObjAfter();
-          Iterator<String> beforeValIter = pobjBefore.getValuesIterator();
-          Iterator<String> afterValIter = pobjAfter.getValuesIterator();
-          for (FieldSchema fs : tblObj.getPartitionKeys()){
-            oldPartSpec.put(fs.getName(), beforeValIter.next());
-            newPartSpec.put(fs.getName(), afterValIter.next());
-          }
-        } catch (Exception e) {
-          if (!(e instanceof SemanticException)){
-            throw new SemanticException("Error reading message members", e);
-          } else {
-            throw (SemanticException)e;
-          }
-        }
-
-        RenamePartitionDesc renamePtnDesc = new RenamePartitionDesc(tableName, oldPartSpec, newPartSpec);
-        Task<DDLWork> renamePtnTask = TaskFactory.get(new DDLWork(inputs, outputs, renamePtnDesc), conf);
-        if (precursor != null){
-          precursor.addDependentTask(renamePtnTask);
-        }
-        List<Task<? extends Serializable>> tasks = new ArrayList<Task<? extends Serializable>>();
-        tasks.add(renamePtnTask);
-        LOG.debug("Added rename ptn task : {}:{}->{}", renamePtnTask.getId(), oldPartSpec, newPartSpec);
-        dbsUpdated.put(actualDbName, dmd.getEventTo());
-        tablesUpdated.put(tableName, dmd.getEventTo());
-        return tasks;
-      }
-      case EVENT_TRUNCATE_PARTITION: {
-        AlterPartitionMessage truncatePtnMessage = md.getAlterPartitionMessage(dmd.getPayload());
-        String actualDbName = ((dbName == null) || dbName.isEmpty() ? truncatePtnMessage.getDB() : dbName);
-        String actualTblName = ((tblName == null) || tblName.isEmpty() ? truncatePtnMessage.getTable() : tblName);
-
-        Map<String, String> partSpec = new LinkedHashMap<String,String>();
-        try {
-          org.apache.hadoop.hive.metastore.api.Table tblObj = truncatePtnMessage.getTableObj();
-          org.apache.hadoop.hive.metastore.api.Partition pobjAfter = truncatePtnMessage.getPtnObjAfter();
-          Iterator<String> afterValIter = pobjAfter.getValuesIterator();
-          for (FieldSchema fs : tblObj.getPartitionKeys()){
-            partSpec.put(fs.getName(), afterValIter.next());
-          }
-        } catch (Exception e) {
-          if (!(e instanceof SemanticException)){
-            throw new SemanticException("Error reading message members", e);
-          } else {
-            throw (SemanticException)e;
-          }
-        }
-
-        TruncateTableDesc truncateTableDesc = new TruncateTableDesc(
-                actualDbName + "." + actualTblName, partSpec);
-        Task<DDLWork> truncatePtnTask = TaskFactory.get(new DDLWork(inputs, outputs, truncateTableDesc), conf);
-        if (precursor != null) {
-          precursor.addDependentTask(truncatePtnTask);
-        }
-
-        List<Task<? extends Serializable>> tasks = new ArrayList<Task<? extends Serializable>>();
-        tasks.add(truncatePtnTask);
-        LOG.debug("Added truncate ptn task : {}:{}", truncatePtnTask.getId(), truncateTableDesc.getTableName());
-        dbsUpdated.put(actualDbName,dmd.getEventTo());
-        return tasks;
-      }
-      case EVENT_INSERT: {
-        md = MessageFactory.getInstance().getDeserializer();
-        InsertMessage insertMessage = md.getInsertMessage(dmd.getPayload());
-        String actualDbName = ((dbName == null) || dbName.isEmpty() ? insertMessage.getDB() : dbName);
-        String actualTblName = ((tblName == null) || tblName.isEmpty() ? insertMessage.getTable() : tblName);
-
-        // Piggybacking in Import logic for now
-        return analyzeTableLoad(actualDbName, actualTblName, locn, precursor, dbsUpdated, tablesUpdated);
-      }
-      case EVENT_UNKNOWN: {
-        break;
-      }
-      default: {
-        break;
-      }
-    }
-    return null;
-  }
-
-  private Map<Integer, List<ExprNodeGenericFuncDesc>> genPartSpecs(Table table,
-      List<Map<String, String>> partitions) throws SemanticException {
-    Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs =
-        new HashMap<Integer, List<ExprNodeGenericFuncDesc>>();
-    int partPrefixLength = 0;
-    if ((partitions != null) && (partitions.size() > 0)) {
-      partPrefixLength = partitions.get(0).size();
-      // pick the length of the first ptn, we expect all ptns listed to have the same number of
-      // key-vals.
-    }
-    List<ExprNodeGenericFuncDesc> ptnDescs = new ArrayList<ExprNodeGenericFuncDesc>();
-    for (Map<String, String> ptn : partitions) {
-      // convert each key-value-map to appropriate expression.
-      ExprNodeGenericFuncDesc expr = null;
-      for (Map.Entry<String, String> kvp : ptn.entrySet()) {
-        String key = kvp.getKey();
-        Object val = kvp.getValue();
-        String type = table.getPartColByName(key).getType();
-        ;
-        PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type);
-        ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, null, true);
-        ExprNodeGenericFuncDesc op = DDLSemanticAnalyzer.makeBinaryPredicate(
-            "=", column, new ExprNodeConstantDesc(pti, val));
-        expr = (expr == null) ? op : DDLSemanticAnalyzer.makeBinaryPredicate("and", expr, op);
-      }
-      if (expr != null) {
-        ptnDescs.add(expr);
+      String dbName, String tblName, String locn, Task<? extends Serializable> precursor,
+      Map<String, Long> dbsUpdated, Map<String, Long> tablesUpdated, DumpMetaData dmd)
+      throws SemanticException {
+    MessageHandler.Context context =
+        new MessageHandler.Context(dbName, tblName, locn, precursor, dmd, conf, db, ctx, LOG);
+    MessageHandler messageHandler = MessageHandlerFactory.handlerFor(dmd.getDumpType());
+    List<Task<? extends Serializable>> tasks = messageHandler.handle(context);
+
+    if (precursor != null) {
+      for (Task<? extends Serializable> t : tasks) {
+        precursor.addDependentTask(t);
+        LOG.debug("Added {}:{} as a precursor of {}:{}",
+            precursor.getClass(), precursor.getId(), t.getClass(), t.getId());
       }
     }
-    if (ptnDescs.size() > 0) {
-      partSpecs.put(partPrefixLength, ptnDescs);
-    }
-    return partSpecs;
+    dbsUpdated.putAll(messageHandler.databasesUpdated());
+    tablesUpdated.putAll(messageHandler.tablesUpdated());
+    inputs.addAll(messageHandler.readEntities());
+    outputs.addAll(messageHandler.writeEntities());
+    return tasks;
   }
 
   private boolean existEmptyDb(String dbName) throws InvalidOperationException, HiveException {

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java
index b1df5a3..c2cffaa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java
@@ -31,6 +31,7 @@ public enum DumpType {
   EVENT_RENAME_PARTITION("EVENT_RENAME_PARTITION"),
   EVENT_TRUNCATE_PARTITION("EVENT_TRUNCATE_PARTITION"),
   EVENT_INSERT("EVENT_INSERT"),
+  EVENT_CREATE_FUNCTION("EVENT_CREATE_FUNCTION"),
   EVENT_UNKNOWN("EVENT_UNKNOWN");
 
   String type = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractHandler.java
new file mode 100644
index 0000000..ba699e3
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractHandler.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
+import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+abstract class AbstractHandler implements EventHandler {
+  static final Logger LOG = LoggerFactory.getLogger(AbstractHandler.class);
+
+  final NotificationEvent event;
+  final MessageDeserializer deserializer;
+
+  AbstractHandler(NotificationEvent event) {
+    this.event = event;
+    deserializer = MessageFactory.getInstance().getDeserializer();
+  }
+
+  @Override
+  public long fromEventId() {
+    return event.getEventId();
+  }
+
+  @Override
+  public long toEventId() {
+    return event.getEventId();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java
new file mode 100644
index 0000000..f4239e5
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage;
+import org.apache.hadoop.hive.metastore.messaging.PartitionFiles;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.EximUtil;
+
+import javax.annotation.Nullable;
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.util.Iterator;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+class AddPartitionHandler extends AbstractHandler {
+  protected AddPartitionHandler(NotificationEvent notificationEvent) {
+    super(notificationEvent);
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    AddPartitionMessage apm = deserializer.getAddPartitionMessage(event.getMessage());
+    LOG.info("Processing#{} ADD_PARTITION message : {}", fromEventId(), event.getMessage());
+    Iterable<org.apache.hadoop.hive.metastore.api.Partition> ptns = apm.getPartitionObjs();
+    if ((ptns == null) || (!ptns.iterator().hasNext())) {
+      LOG.debug("Event#{} was an ADD_PTN_EVENT with no partitions");
+      return;
+    }
+    org.apache.hadoop.hive.metastore.api.Table tobj = apm.getTableObj();
+    if (tobj == null) {
+      LOG.debug("Event#{} was a ADD_PTN_EVENT with no table listed");
+      return;
+    }
+
+    final Table qlMdTable = new Table(tobj);
+    Iterable<Partition> qlPtns = Iterables.transform(
+        ptns,
+        new Function<org.apache.hadoop.hive.metastore.api.Partition, Partition>() {
+          @Nullable
+          @Override
+          public Partition apply(@Nullable org.apache.hadoop.hive.metastore.api.Partition input) {
+            if (input == null) {
+              return null;
+            }
+            try {
+              return new Partition(qlMdTable, input);
+            } catch (HiveException e) {
+              throw new IllegalArgumentException(e);
+            }
+          }
+        }
+    );
+
+    Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
+    EximUtil.createExportDump(
+        metaDataPath.getFileSystem(withinContext.hiveConf),
+        metaDataPath,
+        qlMdTable,
+        qlPtns,
+        withinContext.replicationSpec);
+
+    Iterator<PartitionFiles> partitionFilesIter = apm.getPartitionFilesIter().iterator();
+    for (Partition qlPtn : qlPtns) {
+      Iterable<String> files = partitionFilesIter.next().getFiles();
+      if (files != null) {
+        // encoded filename/checksum of files, write into _files
+        try (BufferedWriter fileListWriter = writer(withinContext, qlPtn)) {
+          for (String file : files) {
+            fileListWriter.write(file + "\n");
+          }
+        }
+      }
+    }
+    withinContext.createDmd(this).write();
+  }
+
+  private BufferedWriter writer(Context withinContext, Partition qlPtn)
+      throws IOException {
+    Path ptnDataPath = new Path(withinContext.eventRoot, qlPtn.getName());
+    FileSystem fs = ptnDataPath.getFileSystem(withinContext.hiveConf);
+    Path filesPath = new Path(ptnDataPath, EximUtil.FILES_NAME);
+    return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath)));
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return DumpType.EVENT_ADD_PARTITION;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java
new file mode 100644
index 0000000..8a7e742
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.EximUtil;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+
+class AlterPartitionHandler extends AbstractHandler {
+  private final org.apache.hadoop.hive.metastore.api.Partition after;
+  private final org.apache.hadoop.hive.metastore.api.Table tableObject;
+  private final boolean isTruncateOp;
+  private final Scenario scenario;
+
+  AlterPartitionHandler(NotificationEvent event) throws Exception {
+    super(event);
+    AlterPartitionMessage apm = deserializer.getAlterPartitionMessage(event.getMessage());
+    tableObject = apm.getTableObj();
+    org.apache.hadoop.hive.metastore.api.Partition before = apm.getPtnObjBefore();
+    after = apm.getPtnObjAfter();
+    isTruncateOp = apm.getIsTruncateOp();
+    scenario = scenarioType(before, after);
+  }
+
+  private enum Scenario {
+    ALTER {
+      @Override
+      DumpType dumpType() {
+        return DumpType.EVENT_ALTER_PARTITION;
+      }
+    },
+    RENAME {
+      @Override
+      DumpType dumpType() {
+        return DumpType.EVENT_RENAME_PARTITION;
+      }
+    },
+    TRUNCATE {
+      @Override
+      DumpType dumpType() {
+        return DumpType.EVENT_TRUNCATE_PARTITION;
+      }
+    };
+
+    abstract DumpType dumpType();
+  }
+
+  private Scenario scenarioType(org.apache.hadoop.hive.metastore.api.Partition before,
+      org.apache.hadoop.hive.metastore.api.Partition after) {
+    Iterator<String> beforeValIter = before.getValuesIterator();
+    Iterator<String> afterValIter = after.getValuesIterator();
+    while(beforeValIter.hasNext()) {
+      if (!beforeValIter.next().equals(afterValIter.next())) {
+        return Scenario.RENAME;
+      }
+    }
+    return isTruncateOp ? Scenario.TRUNCATE : Scenario.ALTER;
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    LOG.info("Processing#{} ALTER_PARTITION message : {}", fromEventId(), event.getMessage());
+
+    if (Scenario.ALTER == scenario) {
+      withinContext.replicationSpec.setIsMetadataOnly(true);
+      Table qlMdTable = new Table(tableObject);
+      List<Partition> partitions = new ArrayList<>();
+      partitions.add(new Partition(qlMdTable, after));
+      Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
+      EximUtil.createExportDump(
+          metaDataPath.getFileSystem(withinContext.hiveConf),
+          metaDataPath,
+          qlMdTable,
+          partitions,
+          withinContext.replicationSpec);
+    }
+    DumpMetaData dmd = withinContext.createDmd(this);
+    dmd.setPayload(event.getMessage());
+    dmd.write();
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return scenario.dumpType();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
new file mode 100644
index 0000000..f457f23
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.EximUtil;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+
+class AlterTableHandler extends AbstractHandler {
+  private final org.apache.hadoop.hive.metastore.api.Table after;
+  private final boolean isTruncateOp;
+  private final Scenario scenario;
+
+  private enum Scenario {
+    ALTER {
+      @Override
+      DumpType dumpType() {
+        return DumpType.EVENT_ALTER_TABLE;
+      }
+    },
+    RENAME {
+      @Override
+      DumpType dumpType() {
+        return DumpType.EVENT_RENAME_TABLE;
+      }
+    },
+    TRUNCATE {
+      @Override
+      DumpType dumpType() {
+        return DumpType.EVENT_TRUNCATE_TABLE;
+      }
+    };
+
+    abstract DumpType dumpType();
+  }
+
+  AlterTableHandler(NotificationEvent event) throws Exception {
+    super(event);
+    AlterTableMessage atm = deserializer.getAlterTableMessage(event.getMessage());
+    org.apache.hadoop.hive.metastore.api.Table before = atm.getTableObjBefore();
+    after = atm.getTableObjAfter();
+    isTruncateOp = atm.getIsTruncateOp();
+    scenario = scenarioType(before, after);
+  }
+
+  private Scenario scenarioType(org.apache.hadoop.hive.metastore.api.Table before,
+      org.apache.hadoop.hive.metastore.api.Table after) {
+    if (before.getDbName().equals(after.getDbName())
+        && before.getTableName().equals(after.getTableName())) {
+      return isTruncateOp ? Scenario.TRUNCATE : Scenario.ALTER;
+    } else {
+      return Scenario.RENAME;
+    }
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    {
+      LOG.info("Processing#{} ALTER_TABLE message : {}", fromEventId(), event.getMessage());
+      if (Scenario.ALTER == scenario) {
+        withinContext.replicationSpec.setIsMetadataOnly(true);
+        Table qlMdTableAfter = new Table(after);
+        Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
+        EximUtil.createExportDump(
+            metaDataPath.getFileSystem(withinContext.hiveConf),
+            metaDataPath,
+            qlMdTableAfter,
+            null,
+            withinContext.replicationSpec);
+      }
+      DumpMetaData dmd = withinContext.createDmd(this);
+      dmd.setPayload(event.getMessage());
+      dmd.write();
+    }
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return scenario.dumpType();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateFunctionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateFunctionHandler.java
new file mode 100644
index 0000000..bebf035
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateFunctionHandler.java
@@ -0,0 +1,36 @@
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage;
+import org.apache.hadoop.hive.ql.parse.EximUtil;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.FunctionSerializer;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+class CreateFunctionHandler extends AbstractHandler {
+  CreateFunctionHandler(NotificationEvent event) {
+    super(event);
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    CreateFunctionMessage createFunctionMessage =
+        deserializer.getCreateFunctionMessage(event.getMessage());
+    LOG.info("Processing#{} CREATE_MESSAGE message : {}", fromEventId(), event.getMessage());
+    Path metadataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
+    FileSystem fileSystem = metadataPath.getFileSystem(withinContext.hiveConf);
+
+    try (JsonWriter jsonWriter = new JsonWriter(fileSystem, metadataPath)) {
+      new FunctionSerializer(createFunctionMessage.getFunctionObj())
+          .writeTo(jsonWriter, withinContext.replicationSpec);
+    }
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return DumpType.EVENT_CREATE_FUNCTION;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
new file mode 100644
index 0000000..ca3607f
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.EximUtil;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+class CreateTableHandler extends AbstractHandler {
+
+  CreateTableHandler(NotificationEvent event) {
+    super(event);
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    CreateTableMessage ctm = deserializer.getCreateTableMessage(event.getMessage());
+    LOG.info("Processing#{} CREATE_TABLE message : {}", fromEventId(), event.getMessage());
+    org.apache.hadoop.hive.metastore.api.Table tobj = ctm.getTableObj();
+
+    if (tobj == null) {
+      LOG.debug("Event#{} was a CREATE_TABLE_EVENT with no table listed");
+      return;
+    }
+
+    Table qlMdTable = new Table(tobj);
+    if (qlMdTable.isView()) {
+      withinContext.replicationSpec.setIsMetadataOnly(true);
+    }
+
+    Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
+    EximUtil.createExportDump(
+        metaDataPath.getFileSystem(withinContext.hiveConf),
+        metaDataPath,
+        qlMdTable,
+        null,
+        withinContext.replicationSpec);
+
+    Path dataPath = new Path(withinContext.eventRoot, "data");
+    Iterable<String> files = ctm.getFiles();
+    if (files != null) {
+      // encoded filename/checksum of files, write into _files
+      try (BufferedWriter fileListWriter = writer(withinContext, dataPath)) {
+        for (String file : files) {
+          fileListWriter.write(file + "\n");
+        }
+      }
+    }
+    withinContext.createDmd(this).write();
+  }
+
+  private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException {
+    FileSystem fs = dataPath.getFileSystem(withinContext.hiveConf);
+    Path filesPath = new Path(dataPath, EximUtil.FILES_NAME);
+    return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath)));
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return DumpType.EVENT_CREATE_TABLE;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DefaultHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DefaultHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DefaultHandler.java
new file mode 100644
index 0000000..0d4665a
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DefaultHandler.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+
+class DefaultHandler extends AbstractHandler {
+
+  DefaultHandler(NotificationEvent event) {
+    super(event);
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    LOG.info("Dummy processing#{} message : {}", fromEventId(), event.getMessage());
+    DumpMetaData dmd = withinContext.createDmd(this);
+    dmd.setPayload(event.getMessage());
+    dmd.write();
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return DumpType.EVENT_UNKNOWN;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java
new file mode 100644
index 0000000..a4eacc4
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+
+class DropPartitionHandler extends AbstractHandler {
+
+  DropPartitionHandler(NotificationEvent event) {
+    super(event);
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    LOG.info("Processing#{} DROP_PARTITION message : {}", fromEventId(), event.getMessage());
+    DumpMetaData dmd = withinContext.createDmd(this);
+    dmd.setPayload(event.getMessage());
+    dmd.write();
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return DumpType.EVENT_DROP_PARTITION;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropTableHandler.java
new file mode 100644
index 0000000..40cd5cb
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropTableHandler.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+
+class DropTableHandler extends AbstractHandler {
+
+  DropTableHandler(NotificationEvent event) {
+    super(event);
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    LOG.info("Processing#{} DROP_TABLE message : {}", fromEventId(), event.getMessage());
+    DumpMetaData dmd = withinContext.createDmd(this);
+    dmd.setPayload(event.getMessage());
+    dmd.write();
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return DumpType.EVENT_DROP_TABLE;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandler.java
new file mode 100644
index 0000000..c0fa7b2
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandler.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+public interface EventHandler {
+  void handle(Context withinContext) throws Exception;
+
+  long fromEventId();
+
+  long toEventId();
+
+  DumpType dumpType();
+
+  class Context {
+    final Path eventRoot, cmRoot;
+    final Hive db;
+    final HiveConf hiveConf;
+    final ReplicationSpec replicationSpec;
+
+    public Context(Path eventRoot, Path cmRoot, Hive db, HiveConf hiveConf,
+        ReplicationSpec replicationSpec) {
+      this.eventRoot = eventRoot;
+      this.cmRoot = cmRoot;
+      this.db = db;
+      this.hiveConf = hiveConf;
+      this.replicationSpec = replicationSpec;
+    }
+
+    DumpMetaData createDmd(EventHandler eventHandler) {
+      return new DumpMetaData(
+          eventRoot,
+          eventHandler.dumpType(),
+          eventHandler.fromEventId(),
+          eventHandler.toEventId(),
+          cmRoot, hiveConf
+      );
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java
new file mode 100644
index 0000000..08dbd13
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Modifier;
+import java.util.HashMap;
+import java.util.Map;
+
+public class EventHandlerFactory {
+  private EventHandlerFactory() {
+  }
+
+  private static Map<String, Class<? extends EventHandler>> registeredHandlers = new HashMap<>();
+
+  static {
+    register(MessageFactory.ADD_PARTITION_EVENT, AddPartitionHandler.class);
+    register(MessageFactory.ALTER_PARTITION_EVENT, AlterPartitionHandler.class);
+    register(MessageFactory.ALTER_TABLE_EVENT, AlterTableHandler.class);
+    register(MessageFactory.CREATE_FUNCTION_EVENT, CreateFunctionHandler.class);
+    register(MessageFactory.CREATE_TABLE_EVENT, CreateTableHandler.class);
+    register(MessageFactory.DROP_PARTITION_EVENT, DropPartitionHandler.class);
+    register(MessageFactory.DROP_TABLE_EVENT, DropTableHandler.class);
+    register(MessageFactory.INSERT_EVENT, InsertHandler.class);
+  }
+
+  static void register(String event, Class<? extends EventHandler> handlerClazz) {
+    try {
+      Constructor<? extends EventHandler> constructor =
+          handlerClazz.getDeclaredConstructor(NotificationEvent.class);
+      assert constructor != null;
+      assert !Modifier.isPrivate(constructor.getModifiers());
+      registeredHandlers.put(event, handlerClazz);
+    } catch (NoSuchMethodException e) {
+      throw new IllegalArgumentException("handler class: " + handlerClazz.getCanonicalName()
+          + " does not have the a constructor with only parameter of type:"
+          + NotificationEvent.class.getCanonicalName(), e);
+    }
+  }
+
+  public static EventHandler handlerFor(NotificationEvent event) {
+    if (registeredHandlers.containsKey(event.getEventType())) {
+      Class<? extends EventHandler> handlerClazz = registeredHandlers.get(event.getEventType());
+      try {
+        Constructor<? extends EventHandler> constructor =
+            handlerClazz.getDeclaredConstructor(NotificationEvent.class);
+        return constructor.newInstance(event);
+      } catch (NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException e) {
+        // this should never happen. however we want to make sure we propagate the exception
+        throw new RuntimeException(
+            "failed when creating handler for " + event.getEventType()
+                + " with the responsible class being " + handlerClazz.getCanonicalName(), e);
+      }
+    }
+    return new DefaultHandler(event);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java
new file mode 100644
index 0000000..0393701
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.events;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.parse.EximUtil;
+import org.apache.thrift.TException;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+
+class InsertHandler extends AbstractHandler {
+
+  InsertHandler(NotificationEvent event) {
+    super(event);
+  }
+
+  @Override
+  public void handle(Context withinContext) throws Exception {
+    InsertMessage insertMsg = deserializer.getInsertMessage(event.getMessage());
+    org.apache.hadoop.hive.ql.metadata.Table qlMdTable = tableObject(withinContext, insertMsg);
+    Map<String, String> partSpec = insertMsg.getPartitionKeyValues();
+    List<Partition> qlPtns = null;
+    if (qlMdTable.isPartitioned() && !partSpec.isEmpty()) {
+      qlPtns = Collections.singletonList(withinContext.db.getPartition(qlMdTable, partSpec, false));
+    }
+    Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
+
+    // Mark the replace type based on INSERT-INTO or INSERT_OVERWRITE operation
+    withinContext.replicationSpec.setIsReplace(insertMsg.isReplace());
+    EximUtil.createExportDump(metaDataPath.getFileSystem(withinContext.hiveConf), metaDataPath,
+        qlMdTable, qlPtns,
+        withinContext.replicationSpec);
+    Iterable<String> files = insertMsg.getFiles();
+
+    if (files != null) {
+      Path dataPath;
+      if ((null == qlPtns) || qlPtns.isEmpty()) {
+        dataPath = new Path(withinContext.eventRoot, EximUtil.DATA_PATH_NAME);
+      } else {
+        /*
+         * Insert into/overwrite operation shall operate on one or more partitions or even partitions from multiple
+         * tables. But, Insert event is generated for each partition to which the data is inserted. So, qlPtns list
+         * will have only one entry.
+         */
+        assert(1 == qlPtns.size());
+        dataPath = new Path(withinContext.eventRoot, qlPtns.get(0).getName());
+      }
+
+      // encoded filename/checksum of files, write into _files
+      try (BufferedWriter fileListWriter = writer(withinContext, dataPath)) {
+        for (String file : files) {
+          fileListWriter.write(file + "\n");
+        }
+      }
+    }
+
+    LOG.info("Processing#{} INSERT message : {}", fromEventId(), event.getMessage());
+    DumpMetaData dmd = withinContext.createDmd(this);
+    dmd.setPayload(event.getMessage());
+    dmd.write();
+  }
+
+  private org.apache.hadoop.hive.ql.metadata.Table tableObject(
+      Context withinContext, InsertMessage insertMsg) throws TException {
+    return new org.apache.hadoop.hive.ql.metadata.Table(
+        withinContext.db.getMSC().getTable(
+            insertMsg.getDB(), insertMsg.getTable()
+        )
+    );
+  }
+
+  private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException {
+    Path filesPath = new Path(dataPath, EximUtil.FILES_NAME);
+    FileSystem fs = dataPath.getFileSystem(withinContext.hiveConf);
+    return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath)));
+  }
+
+  @Override
+  public DumpType dumpType() {
+    return DumpType.EVENT_INSERT;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AbstractHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AbstractHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AbstractHandler.java
deleted file mode 100644
index ab059c2..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AbstractHandler.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
-import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractHandler implements EventHandler {
-  static final Logger LOG = LoggerFactory.getLogger(AbstractHandler.class);
-
-  final NotificationEvent event;
-  final MessageDeserializer deserializer;
-
-  AbstractHandler(NotificationEvent event) {
-    this.event = event;
-    deserializer = MessageFactory.getInstance().getDeserializer();
-  }
-
-  @Override
-  public long fromEventId() {
-    return event.getEventId();
-  }
-
-  @Override
-  public long toEventId() {
-    return event.getEventId();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java
deleted file mode 100644
index 1616ab9..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AddPartitionHandler.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import com.google.common.base.Function;
-import com.google.common.collect.Iterables;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage;
-import org.apache.hadoop.hive.metastore.messaging.PartitionFiles;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.EximUtil;
-
-import javax.annotation.Nullable;
-import java.io.BufferedWriter;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.util.Iterator;
-
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-public class AddPartitionHandler extends AbstractHandler {
-  protected AddPartitionHandler(NotificationEvent notificationEvent) {
-    super(notificationEvent);
-  }
-
-  @Override
-  public void handle(Context withinContext) throws Exception {
-    AddPartitionMessage apm = deserializer.getAddPartitionMessage(event.getMessage());
-    LOG.info("Processing#{} ADD_PARTITION message : {}", fromEventId(), event.getMessage());
-    Iterable<org.apache.hadoop.hive.metastore.api.Partition> ptns = apm.getPartitionObjs();
-    if ((ptns == null) || (!ptns.iterator().hasNext())) {
-      LOG.debug("Event#{} was an ADD_PTN_EVENT with no partitions");
-      return;
-    }
-    org.apache.hadoop.hive.metastore.api.Table tobj = apm.getTableObj();
-    if (tobj == null) {
-      LOG.debug("Event#{} was a ADD_PTN_EVENT with no table listed");
-      return;
-    }
-
-    final Table qlMdTable = new Table(tobj);
-    Iterable<Partition> qlPtns = Iterables.transform(
-        ptns,
-        new Function<org.apache.hadoop.hive.metastore.api.Partition, Partition>() {
-          @Nullable
-          @Override
-          public Partition apply(@Nullable org.apache.hadoop.hive.metastore.api.Partition input) {
-            if (input == null) {
-              return null;
-            }
-            try {
-              return new Partition(qlMdTable, input);
-            } catch (HiveException e) {
-              throw new IllegalArgumentException(e);
-            }
-          }
-        }
-    );
-
-    Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
-    EximUtil.createExportDump(
-        metaDataPath.getFileSystem(withinContext.hiveConf),
-        metaDataPath,
-        qlMdTable,
-        qlPtns,
-        withinContext.replicationSpec);
-
-    Iterator<PartitionFiles> partitionFilesIter = apm.getPartitionFilesIter().iterator();
-    for (Partition qlPtn : qlPtns) {
-      Iterable<String> files = partitionFilesIter.next().getFiles();
-      if (files != null) {
-        // encoded filename/checksum of files, write into _files
-        try (BufferedWriter fileListWriter = writer(withinContext, qlPtn)) {
-          for (String file : files) {
-            fileListWriter.write(file + "\n");
-          }
-        }
-      }
-    }
-    withinContext.createDmd(this).write();
-  }
-
-  private BufferedWriter writer(Context withinContext, Partition qlPtn)
-      throws IOException {
-    Path ptnDataPath = new Path(withinContext.eventRoot, qlPtn.getName());
-    FileSystem fs = ptnDataPath.getFileSystem(withinContext.hiveConf);
-    Path filesPath = new Path(ptnDataPath, EximUtil.FILES_NAME);
-    return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath)));
-  }
-
-  @Override
-  public DumpType dumpType() {
-    return DumpType.EVENT_ADD_PARTITION;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java
deleted file mode 100644
index b6c3496..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterPartitionHandler.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.EximUtil;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
-
-public class AlterPartitionHandler extends AbstractHandler {
-  private final org.apache.hadoop.hive.metastore.api.Partition after;
-  private final org.apache.hadoop.hive.metastore.api.Table tableObject;
-  private final boolean isTruncateOp;
-  private final Scenario scenario;
-
-  AlterPartitionHandler(NotificationEvent event) throws Exception {
-    super(event);
-    AlterPartitionMessage apm = deserializer.getAlterPartitionMessage(event.getMessage());
-    tableObject = apm.getTableObj();
-    org.apache.hadoop.hive.metastore.api.Partition before = apm.getPtnObjBefore();
-    after = apm.getPtnObjAfter();
-    isTruncateOp = apm.getIsTruncateOp();
-    scenario = scenarioType(before, after);
-  }
-
-  private enum Scenario {
-    ALTER {
-      @Override
-      DumpType dumpType() {
-        return DumpType.EVENT_ALTER_PARTITION;
-      }
-    },
-    RENAME {
-      @Override
-      DumpType dumpType() {
-        return DumpType.EVENT_RENAME_PARTITION;
-      }
-    },
-    TRUNCATE {
-      @Override
-      DumpType dumpType() {
-        return DumpType.EVENT_TRUNCATE_PARTITION;
-      }
-    };
-
-    abstract DumpType dumpType();
-  }
-
-  private Scenario scenarioType(org.apache.hadoop.hive.metastore.api.Partition before,
-      org.apache.hadoop.hive.metastore.api.Partition after) {
-    Iterator<String> beforeValIter = before.getValuesIterator();
-    Iterator<String> afterValIter = after.getValuesIterator();
-    while(beforeValIter.hasNext()) {
-      if (!beforeValIter.next().equals(afterValIter.next())) {
-        return Scenario.RENAME;
-      }
-    }
-    return isTruncateOp ? Scenario.TRUNCATE : Scenario.ALTER;
-  }
-
-  @Override
-  public void handle(Context withinContext) throws Exception {
-    LOG.info("Processing#{} ALTER_PARTITION message : {}", fromEventId(), event.getMessage());
-
-    if (Scenario.ALTER == scenario) {
-      withinContext.replicationSpec.setIsMetadataOnly(true);
-      Table qlMdTable = new Table(tableObject);
-      List<Partition> partitions = new ArrayList<>();
-      partitions.add(new Partition(qlMdTable, after));
-      Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
-      EximUtil.createExportDump(
-          metaDataPath.getFileSystem(withinContext.hiveConf),
-          metaDataPath,
-          qlMdTable,
-          partitions,
-          withinContext.replicationSpec);
-    }
-    DumpMetaData dmd = withinContext.createDmd(this);
-    dmd.setPayload(event.getMessage());
-    dmd.write();
-  }
-
-  @Override
-  public DumpType dumpType() {
-    return scenario.dumpType();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java
deleted file mode 100644
index d553240..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/AlterTableHandler.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.EximUtil;
-
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
-
-public class AlterTableHandler extends AbstractHandler {
-  private final org.apache.hadoop.hive.metastore.api.Table after;
-  private final boolean isTruncateOp;
-  private final Scenario scenario;
-
-  private enum Scenario {
-    ALTER {
-      @Override
-      DumpType dumpType() {
-        return DumpType.EVENT_ALTER_TABLE;
-      }
-    },
-    RENAME {
-      @Override
-      DumpType dumpType() {
-        return DumpType.EVENT_RENAME_TABLE;
-      }
-    },
-    TRUNCATE {
-      @Override
-      DumpType dumpType() {
-        return DumpType.EVENT_TRUNCATE_TABLE;
-      }
-    };
-
-    abstract DumpType dumpType();
-  }
-
-  AlterTableHandler(NotificationEvent event) throws Exception {
-    super(event);
-    AlterTableMessage atm = deserializer.getAlterTableMessage(event.getMessage());
-    org.apache.hadoop.hive.metastore.api.Table before = atm.getTableObjBefore();
-    after = atm.getTableObjAfter();
-    isTruncateOp = atm.getIsTruncateOp();
-    scenario = scenarioType(before, after);
-  }
-
-  private Scenario scenarioType(org.apache.hadoop.hive.metastore.api.Table before,
-      org.apache.hadoop.hive.metastore.api.Table after) {
-    if (before.getDbName().equals(after.getDbName())
-        && before.getTableName().equals(after.getTableName())) {
-      return isTruncateOp ? Scenario.TRUNCATE : Scenario.ALTER;
-    } else {
-      return Scenario.RENAME;
-    }
-  }
-
-  @Override
-  public void handle(Context withinContext) throws Exception {
-    {
-      LOG.info("Processing#{} ALTER_TABLE message : {}", fromEventId(), event.getMessage());
-      if (Scenario.ALTER == scenario) {
-        withinContext.replicationSpec.setIsMetadataOnly(true);
-        Table qlMdTableAfter = new Table(after);
-        Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
-        EximUtil.createExportDump(
-            metaDataPath.getFileSystem(withinContext.hiveConf),
-            metaDataPath,
-            qlMdTableAfter,
-            null,
-            withinContext.replicationSpec);
-      }
-      DumpMetaData dmd = withinContext.createDmd(this);
-      dmd.setPayload(event.getMessage());
-      dmd.write();
-    }
-  }
-
-  @Override
-  public DumpType dumpType() {
-    return scenario.dumpType();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java
deleted file mode 100644
index 88600fd..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/CreateTableHandler.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.EximUtil;
-
-import java.io.BufferedWriter;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-public class CreateTableHandler extends AbstractHandler {
-
-  CreateTableHandler(NotificationEvent event) {
-    super(event);
-  }
-
-  @Override
-  public void handle(Context withinContext) throws Exception {
-    CreateTableMessage ctm = deserializer.getCreateTableMessage(event.getMessage());
-    LOG.info("Processing#{} CREATE_TABLE message : {}", fromEventId(), event.getMessage());
-    org.apache.hadoop.hive.metastore.api.Table tobj = ctm.getTableObj();
-
-    if (tobj == null) {
-      LOG.debug("Event#{} was a CREATE_TABLE_EVENT with no table listed");
-      return;
-    }
-
-    Table qlMdTable = new Table(tobj);
-    if (qlMdTable.isView()) {
-      withinContext.replicationSpec.setIsMetadataOnly(true);
-    }
-
-    Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
-    EximUtil.createExportDump(
-        metaDataPath.getFileSystem(withinContext.hiveConf),
-        metaDataPath,
-        qlMdTable,
-        null,
-        withinContext.replicationSpec);
-
-    Path dataPath = new Path(withinContext.eventRoot, "data");
-    Iterable<String> files = ctm.getFiles();
-    if (files != null) {
-      // encoded filename/checksum of files, write into _files
-      try (BufferedWriter fileListWriter = writer(withinContext, dataPath)) {
-        for (String file : files) {
-          fileListWriter.write(file + "\n");
-        }
-      }
-    }
-    withinContext.createDmd(this).write();
-  }
-
-  private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException {
-    FileSystem fs = dataPath.getFileSystem(withinContext.hiveConf);
-    Path filesPath = new Path(dataPath, EximUtil.FILES_NAME);
-    return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath)));
-  }
-
-  @Override
-  public DumpType dumpType() {
-    return DumpType.EVENT_CREATE_TABLE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java
deleted file mode 100644
index 78cd74f..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DefaultHandler.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
-
-public class DefaultHandler extends AbstractHandler {
-
-  DefaultHandler(NotificationEvent event) {
-    super(event);
-  }
-
-  @Override
-  public void handle(Context withinContext) throws Exception {
-    LOG.info("Dummy processing#{} message : {}", fromEventId(), event.getMessage());
-    DumpMetaData dmd = withinContext.createDmd(this);
-    dmd.setPayload(event.getMessage());
-    dmd.write();
-  }
-
-  @Override
-  public DumpType dumpType() {
-    return DumpType.EVENT_UNKNOWN;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java
deleted file mode 100644
index c4a0908..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropPartitionHandler.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
-
-public class DropPartitionHandler extends AbstractHandler {
-
-  DropPartitionHandler(NotificationEvent event) {
-    super(event);
-  }
-
-  @Override
-  public void handle(Context withinContext) throws Exception {
-    LOG.info("Processing#{} DROP_PARTITION message : {}", fromEventId(), event.getMessage());
-    DumpMetaData dmd = withinContext.createDmd(this);
-    dmd.setPayload(event.getMessage());
-    dmd.write();
-  }
-
-  @Override
-  public DumpType dumpType() {
-    return DumpType.EVENT_DROP_PARTITION;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9d4f13af/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java
deleted file mode 100644
index e3addaf..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/events/DropTableHandler.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.events;
-
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-
-import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-
-import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
-
-public class DropTableHandler extends AbstractHandler {
-
-  DropTableHandler(NotificationEvent event) {
-    super(event);
-  }
-
-  @Override
-  public void handle(Context withinContext) throws Exception {
-    LOG.info("Processing#{} DROP_TABLE message : {}", fromEventId(), event.getMessage());
-    DumpMetaData dmd = withinContext.createDmd(this);
-    dmd.setPayload(event.getMessage());
-    dmd.write();
-  }
-
-  @Override
-  public DumpType dumpType() {
-    return DumpType.EVENT_DROP_TABLE;
-  }
-}


[13/50] [abbrv] hive git commit: HIVE-16513 : width_bucket issues (Sahil Takiar via Ashutosh Chauhan)

Posted by we...@apache.org.
HIVE-16513 : width_bucket issues (Sahil Takiar via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f8f9155d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f8f9155d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f8f9155d

Branch: refs/heads/hive-14535
Commit: f8f9155dab346a2a112ba2bb4b9162b98cab404c
Parents: c6b5ad6
Author: Sahil Takiar <ta...@gmail.com>
Authored: Thu May 4 10:01:02 2017 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu May 4 10:01:02 2017 -0700

----------------------------------------------------------------------
 .../ql/udf/generic/GenericUDFWidthBucket.java   | 279 ++++++++-
 .../queries/clientpositive/udf_width_bucket.q   | 175 ++++++
 .../clientpositive/udf_width_bucket.q.out       | 569 +++++++++++++++++++
 3 files changed, 995 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f8f9155d/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWidthBucket.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWidthBucket.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWidthBucket.java
index 3ba24ed..bcf5c49 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWidthBucket.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWidthBucket.java
@@ -18,13 +18,25 @@
 package org.apache.hadoop.hive.ql.udf.generic;
 
 import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.io.ByteWritable;
+import org.apache.hadoop.hive.serde2.io.DoubleWritable;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+
+import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 
@@ -33,22 +45,27 @@ import static org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveO
 
 
 @Description(name = "width_bucket",
-    value = "_FUNC_(expr, min_value, max_value, num_buckets) - Returns an integer between 0 and num_buckets+1 by "
-        + "mapping the expr into buckets defined by the range [min_value, max_value]",
-    extended = "Returns an integer between 0 and num_buckets+1 by "
-        + "mapping expr into the ith equally sized bucket. Buckets are made by dividing [min_value, max_value] into "
-        + "equally sized regions. If expr < min_value, return 1, if expr > max_value return num_buckets+1\n"
-        + "Example: expr is an integer column withs values 1, 10, 20, 30.\n"
-        + "  > SELECT _FUNC_(expr, 5, 25, 4) FROM src;\n1\n1\n3\n5")
+        value = "_FUNC_(expr, min_value, max_value, num_buckets) - Returns an integer between 0 and num_buckets+1 by "
+                + "mapping the expr into buckets defined by the range [min_value, max_value]",
+        extended = "Returns an integer between 0 and num_buckets+1 by "
+                + "mapping expr into the ith equally sized bucket. Buckets are made by dividing [min_value, max_value] into "
+                + "equally sized regions. If expr < min_value, return 1, if expr > max_value return num_buckets+1\n"
+                + "Example: expr is an integer column withs values 1, 10, 20, 30.\n"
+                + "  > SELECT _FUNC_(expr, 5, 25, 4) FROM src;\n1\n1\n3\n5")
 public class GenericUDFWidthBucket extends GenericUDF {
 
-  private transient PrimitiveObjectInspector.PrimitiveCategory[] inputTypes = new PrimitiveObjectInspector.PrimitiveCategory[4];
-  private transient ObjectInspectorConverters.Converter[] converters = new ObjectInspectorConverters.Converter[4];
+  private transient ObjectInspector[] objectInspectors;
+  private transient ObjectInspector commonExprMinMaxOI;
+  private transient ObjectInspectorConverters.Converter epxrConverterOI;
+  private transient ObjectInspectorConverters.Converter minValueConverterOI;
+  private transient ObjectInspectorConverters.Converter maxValueConverterOI;
 
   private final IntWritable output = new IntWritable();
 
   @Override
   public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    this.objectInspectors = arguments;
+
     checkArgsSize(arguments, 4, 4);
 
     checkArgPrimitive(arguments, 0);
@@ -56,43 +73,249 @@ public class GenericUDFWidthBucket extends GenericUDF {
     checkArgPrimitive(arguments, 2);
     checkArgPrimitive(arguments, 3);
 
+    PrimitiveObjectInspector.PrimitiveCategory[] inputTypes = new PrimitiveObjectInspector.PrimitiveCategory[4];
     checkArgGroups(arguments, 0, inputTypes, NUMERIC_GROUP, VOID_GROUP);
     checkArgGroups(arguments, 1, inputTypes, NUMERIC_GROUP, VOID_GROUP);
     checkArgGroups(arguments, 2, inputTypes, NUMERIC_GROUP, VOID_GROUP);
     checkArgGroups(arguments, 3, inputTypes, NUMERIC_GROUP, VOID_GROUP);
 
-    obtainLongConverter(arguments, 0, inputTypes, converters);
-    obtainLongConverter(arguments, 1, inputTypes, converters);
-    obtainLongConverter(arguments, 2, inputTypes, converters);
-    obtainIntConverter(arguments, 3, inputTypes, converters);
+    TypeInfo exprTypeInfo = TypeInfoUtils.getTypeInfoFromObjectInspector(this.objectInspectors[0]);
+    TypeInfo minValueTypeInfo = TypeInfoUtils.getTypeInfoFromObjectInspector(this.objectInspectors[1]);
+    TypeInfo maxValueTypeInfo = TypeInfoUtils.getTypeInfoFromObjectInspector(this.objectInspectors[2]);
+
+    TypeInfo commonExprMinMaxTypeInfo = FunctionRegistry.getCommonClassForComparison(exprTypeInfo,
+            FunctionRegistry.getCommonClassForComparison(minValueTypeInfo, maxValueTypeInfo));
+
+    this.commonExprMinMaxOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(commonExprMinMaxTypeInfo);
+
+    this.epxrConverterOI = ObjectInspectorConverters.getConverter(this.objectInspectors[0], this.commonExprMinMaxOI);
+    this.minValueConverterOI = ObjectInspectorConverters.getConverter(this.objectInspectors[1], this.commonExprMinMaxOI);
+    this.maxValueConverterOI = ObjectInspectorConverters.getConverter(this.objectInspectors[2], this.commonExprMinMaxOI);
 
     return PrimitiveObjectInspectorFactory.writableIntObjectInspector;
   }
 
   @Override
   public Object evaluate(DeferredObject[] arguments) throws HiveException {
-    Long exprValue = getLongValue(arguments, 0, converters);
-    Long minValue = getLongValue(arguments, 1, converters);
-    Long maxValue = getLongValue(arguments, 2, converters);
-    Integer numBuckets = getIntValue(arguments, 3, converters);
-
-    if (exprValue == null || minValue == null || maxValue == null || numBuckets == null) {
+    if (arguments[0].get() == null || arguments[1].get() == null || arguments[2].get() == null || arguments[3].get() == null) {
       return null;
     }
 
+    Object exprValue = this.epxrConverterOI.convert(arguments[0].get());
+    Object minValue = this.minValueConverterOI.convert(arguments[1].get());
+    Object maxValue = this.maxValueConverterOI.convert(arguments[2].get());
+
+    int numBuckets = PrimitiveObjectInspectorUtils.getInt(arguments[3].get(),
+            (PrimitiveObjectInspector) this.objectInspectors[3]);
+
+    switch (((PrimitiveObjectInspector) this.commonExprMinMaxOI).getPrimitiveCategory()) {
+      case SHORT:
+        return evaluate(((ShortWritable) exprValue).get(), ((ShortWritable) minValue).get(),
+                ((ShortWritable) maxValue).get(), numBuckets);
+      case INT:
+        return evaluate(((IntWritable) exprValue).get(), ((IntWritable) minValue).get(),
+                ((IntWritable) maxValue).get(), numBuckets);
+      case LONG:
+        return evaluate(((LongWritable) exprValue).get(), ((LongWritable) minValue).get(),
+                ((LongWritable) maxValue).get(), numBuckets);
+      case FLOAT:
+        return evaluate(((FloatWritable) exprValue).get(), ((FloatWritable) minValue).get(),
+                ((FloatWritable) maxValue).get(), numBuckets);
+      case DOUBLE:
+        return evaluate(((DoubleWritable) exprValue).get(), ((DoubleWritable) minValue).get(),
+                ((DoubleWritable) maxValue).get(), numBuckets);
+      case DECIMAL:
+        return evaluate(((HiveDecimalWritable) exprValue).getHiveDecimal(),
+                ((HiveDecimalWritable) minValue).getHiveDecimal(), ((HiveDecimalWritable) maxValue).getHiveDecimal(),
+                numBuckets);
+      case BYTE:
+        return evaluate(((ByteWritable) exprValue).get(), ((ByteWritable) minValue).get(),
+                ((ByteWritable) maxValue).get(), numBuckets);
+      default:
+        throw new IllegalStateException(
+                "Error: width_bucket could not determine a common primitive type for all inputs");
+    }
+  }
+
+  private IntWritable evaluate(short exprValue, short minValue, short maxValue, int numBuckets) {
+
+    Preconditions.checkArgument(numBuckets > 0, "numBuckets in width_bucket function must be above 0");
+    Preconditions.checkArgument(maxValue != minValue, "maxValue cannot be equal to minValue in width_bucket function");
+
+    if (maxValue > minValue) {
+      if (exprValue < minValue) {
+        output.set(0);
+      } else if (exprValue >= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (exprValue - minValue) / (maxValue - minValue)) + 1));
+      }
+    } else {
+      if (exprValue > minValue) {
+        output.set(0);
+      } else if (exprValue <= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (minValue - exprValue) / (minValue - maxValue)) + 1));
+      }
+    }
+
+    return output;
+  }
+
+  private IntWritable evaluate(int exprValue, int minValue, int maxValue, int numBuckets) {
+
+    Preconditions.checkArgument(numBuckets > 0, "numBuckets in width_bucket function must be above 0");
+    Preconditions.checkArgument(maxValue != minValue, "maxValue cannot be equal to minValue in width_bucket function");
+
+    if (maxValue > minValue) {
+      if (exprValue < minValue) {
+        output.set(0);
+      } else if (exprValue >= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (exprValue - minValue) / (maxValue - minValue)) + 1));
+      }
+    } else {
+      if (exprValue > minValue) {
+        output.set(0);
+      } else if (exprValue <= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (minValue - exprValue) / (minValue - maxValue)) + 1));
+      }
+    }
+
+    return output;
+  }
+
+  private IntWritable evaluate(long exprValue, long minValue, long maxValue, int numBuckets) {
+
     Preconditions.checkArgument(numBuckets > 0, "numBuckets in width_bucket function must be above 0");
-    long intervalSize = (maxValue - minValue) / numBuckets;
+    Preconditions.checkArgument(maxValue != minValue, "maxValue cannot be equal to minValue in width_bucket function");
+
+    if (maxValue > minValue) {
+      if (exprValue < minValue) {
+        output.set(0);
+      } else if (exprValue >= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (exprValue - minValue) / (maxValue - minValue)) + 1));
+      }
+    } else {
+      if (exprValue > minValue) {
+        output.set(0);
+      } else if (exprValue <= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (minValue - exprValue) / (minValue - maxValue)) + 1));
+      }
+    }
 
-    if (exprValue < minValue) {
-      output.set(0);
-    } else if (exprValue > maxValue) {
-      output.set(numBuckets + 1);
+    return output;
+  }
+
+  private IntWritable evaluate(float exprValue, float minValue, float maxValue, int numBuckets) {
+
+    Preconditions.checkArgument(numBuckets > 0, "numBuckets in width_bucket function must be above 0");
+    Preconditions.checkArgument(maxValue != minValue, "maxValue cannot be equal to minValue in width_bucket function");
+
+    if (maxValue > minValue) {
+      if (exprValue < minValue) {
+        output.set(0);
+      } else if (exprValue >= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (exprValue - minValue) / (maxValue - minValue)) + 1));
+      }
+    } else {
+      if (exprValue > minValue) {
+        output.set(0);
+      } else if (exprValue <= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (minValue - exprValue) / (minValue - maxValue)) + 1));
+      }
+    }
+
+    return output;
+  }
+
+  private IntWritable evaluate(double exprValue, double minValue, double maxValue, int numBuckets) {
+
+    Preconditions.checkArgument(numBuckets > 0, "numBuckets in width_bucket function must be above 0");
+    Preconditions.checkArgument(maxValue != minValue, "maxValue cannot be equal to minValue in width_bucket function");
+
+    if (maxValue > minValue) {
+      if (exprValue < minValue) {
+        output.set(0);
+      } else if (exprValue >= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (exprValue - minValue) / (maxValue - minValue)) + 1));
+      }
+    } else {
+      if (exprValue > minValue) {
+        output.set(0);
+      } else if (exprValue <= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (minValue - exprValue) / (minValue - maxValue)) + 1));
+      }
+    }
+
+    return output;
+  }
+
+  private IntWritable evaluate(HiveDecimal exprValue, HiveDecimal minValue, HiveDecimal maxValue,
+                                      int numBuckets) {
+
+    Preconditions.checkArgument(numBuckets > 0, "numBuckets in width_bucket function must be above 0");
+    Preconditions.checkArgument(!maxValue.equals(minValue),
+            "maxValue cannot be equal to minValue in width_bucket function");
+
+    if (maxValue.compareTo(minValue) > 0) {
+      if (exprValue.compareTo(minValue) < 0) {
+        output.set(0);
+      } else if (exprValue.compareTo(maxValue) >= 0) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set(HiveDecimal.create(numBuckets).multiply(exprValue.subtract(minValue)).divide(
+                maxValue.subtract(minValue)).add(HiveDecimal.ONE).intValue());
+      }
+    } else {
+      if (exprValue.compareTo(minValue) > 0) {
+        output.set(0);
+      } else if (exprValue.compareTo(maxValue) <= 0) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set(HiveDecimal.create(numBuckets).multiply(minValue.subtract(exprValue)).divide(
+                minValue.subtract(maxValue)).add(HiveDecimal.ONE).intValue());
+      }
+    }
+
+    return output;
+  }
+
+   private Object evaluate(byte exprValue, byte minValue, byte maxValue, int numBuckets) {
+         Preconditions.checkArgument(numBuckets > 0, "numBuckets in width_bucket function must be above 0");
+    Preconditions.checkArgument(maxValue != minValue, "maxValue cannot be equal to minValue in width_bucket function");
+
+    if (maxValue > minValue) {
+      if (exprValue < minValue) {
+        output.set(0);
+      } else if (exprValue >= maxValue) {
+        output.set(numBuckets + 1);
+      } else {
+        output.set((int) Math.floor((numBuckets * (exprValue - minValue) / (maxValue - minValue)) + 1));
+      }
     } else {
-      long diff = exprValue - minValue;
-      if (diff % intervalSize == 0) {
-        output.set((int) (diff/intervalSize + 1));
+      if (exprValue > minValue) {
+        output.set(0);
+      } else if (exprValue <= maxValue) {
+        output.set(numBuckets + 1);
       } else {
-        output.set((int) Math.ceil((double) (diff) / intervalSize));
+        output.set((int) Math.floor((numBuckets * (minValue - exprValue) / (minValue - maxValue)) + 1));
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f8f9155d/ql/src/test/queries/clientpositive/udf_width_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_width_bucket.q b/ql/src/test/queries/clientpositive/udf_width_bucket.q
index 6ac60d6..dfdbee1 100644
--- a/ql/src/test/queries/clientpositive/udf_width_bucket.q
+++ b/ql/src/test/queries/clientpositive/udf_width_bucket.q
@@ -3,12 +3,14 @@ desc function extended width_bucket;
 
 explain select width_bucket(10, 5, 25, 4);
 
+-- Test with constants
 select
 width_bucket(1, 5, 25, 4),
 width_bucket(10, 5, 25, 4),
 width_bucket(20, 5, 25, 4),
 width_bucket(30, 5, 25, 4);
 
+-- Test with NULLs
 select
 width_bucket(1, NULL, 25, 4),
 width_bucket(NULL, 5, 25, 4),
@@ -16,14 +18,187 @@ width_bucket(20, 5, NULL, 4),
 width_bucket(30, 5, 25, NULL),
 width_bucket(NULL, NULL, NULL, NULL);
 
+-- Test with negative values
 select
 width_bucket(-1, -25, -5, 4),
 width_bucket(-10, -25, -5, 4),
 width_bucket(-20, -25, -5, 4),
 width_bucket(-30, -25, -5, 4);
 
+-- Test with positive and negative values
 select
 width_bucket(-10, -5, 15, 4),
 width_bucket(0, -5, 15, 4),
 width_bucket(10, -5, 15, 4),
 width_bucket(20, -5, 15, 4);
+
+-- Test with decimals
+select
+width_bucket(0.1, 0, 1, 10),
+width_bucket(0.25, 0, 1, 10),
+width_bucket(0.3456, 0, 1, 10),
+width_bucket(0.654321, 0, 1, 10);
+
+-- Test with negative decimals
+select
+width_bucket(-0.5, -1.5, 1.5, 10),
+width_bucket(-0.3, -1.5, 1.5, 10),
+width_bucket(-0.25, -1.5, 1.5, 10),
+width_bucket(0, -1.5, 1.5, 10),
+width_bucket(0.75, -1.5, 1.5, 10),
+width_bucket(1.25, -1.5, 1.5, 10),
+width_bucket(1.5, -1.5, 1.5, 10);
+
+-- Test with minValue > maxValue
+select
+width_bucket(1, 25, 5, 4),
+width_bucket(10, 25, 5, 4),
+width_bucket(20, 25, 5, 4),
+width_bucket(30, 25, 5, 4);
+
+-- Test with minValue > maxValue, with positive and negative values
+select
+width_bucket(-10, 15, -5, 4),
+width_bucket(0, 15, -5, 4),
+width_bucket(10, 15, -5, 4),
+width_bucket(20, 15, -5, 4);
+
+-- Test with minValue > maxValue, with decimals
+select
+width_bucket(0.1, 1, 0, 10),
+width_bucket(0.25, 1, 0, 10),
+width_bucket(0.3456, 1, 0, 10),
+width_bucket(0.654321, 1, 0, 10);
+
+-- Test with small decimal values
+create table alldecimaltypes(
+    cfloat FLOAT,
+    cdouble DOUBLE);
+
+insert into table alldecimaltypes values (0.1, 0.1), (0.25, 0.25), (0.3456, 0.3456), (0.654321, 0.654321);
+
+select
+width_bucket(cfloat, 0, 1, 10),
+width_bucket(cdouble, 0, 1, 10)
+from alldecimaltypes;
+
+select
+width_bucket(cfloat, 0, 1.5, 10),
+width_bucket(cdouble, -1.5, 0, 10),
+width_bucket(0.25, cfloat, 2, 10),
+width_bucket(0.25, 0, cdouble, 10)
+from alldecimaltypes;
+
+-- Test with all numeric types
+create table alltypes(
+    ctinyint TINYINT,
+    csmallint SMALLINT,
+    cint INT,
+    cbigint BIGINT,
+    cfloat FLOAT,
+    cdouble DOUBLE);
+
+insert into table alltypes values
+(0, 0, 0, 0, 0.0, 0.0),
+(1, 1, 1, 1, 1.0, 1.0),
+(25, 25, 25, 25, 25.0, 25.0),
+(60, 60, 60, 60, 60.0, 60.0),
+(72, 72, 72, 72, 72.0, 72.0),
+(100, 100, 100, 100, 100.0, 100.0);
+
+-- Test each numeric type individually
+select
+width_bucket(ctinyint, 0, 100, 10),
+width_bucket(csmallint, 0, 100, 10),
+width_bucket(cint, 0, 100, 10),
+width_bucket(cbigint, 0, 100, 10),
+width_bucket(cfloat, 0, 100, 10),
+width_bucket(cdouble, 0, 100, 10)
+from alltypes;
+
+truncate table alltypes;
+
+insert into table alltypes values (5, 5, 5, 10, 4.5, 7.25);
+
+-- Test different numeric types in a single query
+select
+width_bucket(cdouble, ctinyint, cbigint, 10),
+width_bucket(cdouble, csmallint, cbigint, 10),
+width_bucket(cdouble, cint, cbigint, 10),
+width_bucket(cdouble, cfloat, cbigint, 10)
+from alltypes;
+
+-- Test all tinyints
+create table alltinyints (
+    ctinyint1 TINYINT,
+    ctinyint2 TINYINT,
+    ctinyint3 TINYINT,
+    cint INT);
+
+insert into table alltinyints values (5, 1, 10, 2);
+select width_bucket(ctinyint1, ctinyint2, ctinyint3, cint) from alltinyints;
+
+-- Test all smallints
+create table allsmallints (
+    csmallint1 SMALLINT,
+    csmallint2 SMALLINT,
+    csmallint3 SMALLINT,
+    cint INT);
+
+insert into table allsmallints values (5, 1, 10, 2);
+select width_bucket(csmallint1, csmallint2, csmallint3, cint) from allsmallints;
+
+-- Test all ints
+create table allints (
+    cint1 INT,
+    cint2 INT,
+    cint3 INT,
+    cint4 INT);
+
+insert into table allints values (5, 1, 10, 2);
+select width_bucket(cint1, cint2, cint3, cint4) from allints;
+
+-- Test all bigints
+create table allbigints (
+    cbigint1 BIGINT,
+    cbigint2 BIGINT,
+    cbigint3 BIGINT,
+    cint INT);
+
+insert into table allbigints values (5, 1, 10, 2);
+select width_bucket(cbigint1, cbigint2, cbigint3, cint) from allbigints;
+
+-- Test all floats
+create table allfloats (
+    cfloat1 FLOAT,
+    cfloat2 FLOAT,
+    cfloat3 FLOAT,
+    cint INT);
+
+insert into table allfloats values (5.0, 1.0, 10.0, 2);
+select width_bucket(cfloat1, cfloat2, cfloat3, cint) from allfloats;
+
+-- Test all doubles
+create table alldoubles (
+    cdouble1 DOUBLE,
+    cdouble2 DOUBLE,
+    cdouble3 DOUBLE,
+    cint INT);
+
+insert into table alldoubles values (5.0, 1.0, 10.0, 2);
+select width_bucket(cdouble1, cdouble2, cdouble3, cint) from alldoubles;
+
+-- Test with grouping sets
+create table testgroupingsets (c1 int, c2 int);
+insert into table testgroupingsets values (1, 1), (2, 2);
+select c1, c2, width_bucket(5, c1, 10, case when grouping(c2) = 0 then 10 else 5 end) from testgroupingsets group by cube(c1, c2);
+
+drop table alldecimaltype;
+drop table alltypes;
+drop table alltinyints;
+drop table allsmallints;
+drop table allints;
+drop table allbigints;
+drop table allfloats;
+drop table alldoubles;
+drop table testgroupingsets;

http://git-wip-us.apache.org/repos/asf/hive/blob/f8f9155d/ql/src/test/results/clientpositive/udf_width_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_width_bucket.q.out b/ql/src/test/results/clientpositive/udf_width_bucket.q.out
index a72e977..6879631 100644
--- a/ql/src/test/results/clientpositive/udf_width_bucket.q.out
+++ b/ql/src/test/results/clientpositive/udf_width_bucket.q.out
@@ -109,3 +109,572 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 #### A masked pattern was here ####
 0	2	4	5
+PREHOOK: query: select
+width_bucket(0.1, 0, 1, 10),
+width_bucket(0.25, 0, 1, 10),
+width_bucket(0.3456, 0, 1, 10),
+width_bucket(0.654321, 0, 1, 10)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(0.1, 0, 1, 10),
+width_bucket(0.25, 0, 1, 10),
+width_bucket(0.3456, 0, 1, 10),
+width_bucket(0.654321, 0, 1, 10)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+2	3	4	7
+PREHOOK: query: select
+width_bucket(-0.5, -1.5, 1.5, 10),
+width_bucket(-0.3, -1.5, 1.5, 10),
+width_bucket(-0.25, -1.5, 1.5, 10),
+width_bucket(0, -1.5, 1.5, 10),
+width_bucket(0.75, -1.5, 1.5, 10),
+width_bucket(1.25, -1.5, 1.5, 10),
+width_bucket(1.5, -1.5, 1.5, 10)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(-0.5, -1.5, 1.5, 10),
+width_bucket(-0.3, -1.5, 1.5, 10),
+width_bucket(-0.25, -1.5, 1.5, 10),
+width_bucket(0, -1.5, 1.5, 10),
+width_bucket(0.75, -1.5, 1.5, 10),
+width_bucket(1.25, -1.5, 1.5, 10),
+width_bucket(1.5, -1.5, 1.5, 10)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+4	5	5	6	8	10	11
+PREHOOK: query: select
+width_bucket(1, 25, 5, 4),
+width_bucket(10, 25, 5, 4),
+width_bucket(20, 25, 5, 4),
+width_bucket(30, 25, 5, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(1, 25, 5, 4),
+width_bucket(10, 25, 5, 4),
+width_bucket(20, 25, 5, 4),
+width_bucket(30, 25, 5, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+5	4	2	0
+PREHOOK: query: select
+width_bucket(-10, 15, -5, 4),
+width_bucket(0, 15, -5, 4),
+width_bucket(10, 15, -5, 4),
+width_bucket(20, 15, -5, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(-10, 15, -5, 4),
+width_bucket(0, 15, -5, 4),
+width_bucket(10, 15, -5, 4),
+width_bucket(20, 15, -5, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+5	4	2	0
+PREHOOK: query: select
+width_bucket(0.1, 1, 0, 10),
+width_bucket(0.25, 1, 0, 10),
+width_bucket(0.3456, 1, 0, 10),
+width_bucket(0.654321, 1, 0, 10)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(0.1, 1, 0, 10),
+width_bucket(0.25, 1, 0, 10),
+width_bucket(0.3456, 1, 0, 10),
+width_bucket(0.654321, 1, 0, 10)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+10	8	7	4
+PREHOOK: query: create table alldecimaltypes(
+    cfloat FLOAT,
+    cdouble DOUBLE)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alldecimaltypes
+POSTHOOK: query: create table alldecimaltypes(
+    cfloat FLOAT,
+    cdouble DOUBLE)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alldecimaltypes
+PREHOOK: query: insert into table alldecimaltypes values (0.1, 0.1), (0.25, 0.25), (0.3456, 0.3456), (0.654321, 0.654321)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@alldecimaltypes
+POSTHOOK: query: insert into table alldecimaltypes values (0.1, 0.1), (0.25, 0.25), (0.3456, 0.3456), (0.654321, 0.654321)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@alldecimaltypes
+POSTHOOK: Lineage: alldecimaltypes.cdouble EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: alldecimaltypes.cfloat EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select
+width_bucket(cfloat, 0, 1, 10),
+width_bucket(cdouble, 0, 1, 10)
+from alldecimaltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alldecimaltypes
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(cfloat, 0, 1, 10),
+width_bucket(cdouble, 0, 1, 10)
+from alldecimaltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alldecimaltypes
+#### A masked pattern was here ####
+2	2
+3	3
+4	4
+7	7
+PREHOOK: query: select
+width_bucket(cfloat, 0, 1.5, 10),
+width_bucket(cdouble, -1.5, 0, 10),
+width_bucket(0.25, cfloat, 2, 10),
+width_bucket(0.25, 0, cdouble, 10)
+from alldecimaltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alldecimaltypes
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(cfloat, 0, 1.5, 10),
+width_bucket(cdouble, -1.5, 0, 10),
+width_bucket(0.25, cfloat, 2, 10),
+width_bucket(0.25, 0, cdouble, 10)
+from alldecimaltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alldecimaltypes
+#### A masked pattern was here ####
+1	11	1	11
+2	11	1	11
+3	11	0	8
+5	11	0	4
+PREHOOK: query: create table alltypes(
+    ctinyint TINYINT,
+    csmallint SMALLINT,
+    cint INT,
+    cbigint BIGINT,
+    cfloat FLOAT,
+    cdouble DOUBLE)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: create table alltypes(
+    ctinyint TINYINT,
+    csmallint SMALLINT,
+    cint INT,
+    cbigint BIGINT,
+    cfloat FLOAT,
+    cdouble DOUBLE)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: insert into table alltypes values
+(0, 0, 0, 0, 0.0, 0.0),
+(1, 1, 1, 1, 1.0, 1.0),
+(25, 25, 25, 25, 25.0, 25.0),
+(60, 60, 60, 60, 60.0, 60.0),
+(72, 72, 72, 72, 72.0, 72.0),
+(100, 100, 100, 100, 100.0, 100.0)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: insert into table alltypes values
+(0, 0, 0, 0, 0.0, 0.0),
+(1, 1, 1, 1, 1.0, 1.0),
+(25, 25, 25, 25, 25.0, 25.0),
+(60, 60, 60, 60, 60.0, 60.0),
+(72, 72, 72, 72, 72.0, 72.0),
+(100, 100, 100, 100, 100.0, 100.0)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@alltypes
+POSTHOOK: Lineage: alltypes.cbigint EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.cdouble EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col6, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.cfloat EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.cint EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.csmallint EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.ctinyint EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select
+width_bucket(ctinyint, 0, 100, 10),
+width_bucket(csmallint, 0, 100, 10),
+width_bucket(cint, 0, 100, 10),
+width_bucket(cbigint, 0, 100, 10),
+width_bucket(cfloat, 0, 100, 10),
+width_bucket(cdouble, 0, 100, 10)
+from alltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(ctinyint, 0, 100, 10),
+width_bucket(csmallint, 0, 100, 10),
+width_bucket(cint, 0, 100, 10),
+width_bucket(cbigint, 0, 100, 10),
+width_bucket(cfloat, 0, 100, 10),
+width_bucket(cdouble, 0, 100, 10)
+from alltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes
+#### A masked pattern was here ####
+1	1	1	1	1	1
+1	1	1	1	1	1
+3	3	3	3	3	3
+7	7	7	7	7	7
+8	8	8	8	8	8
+11	11	11	11	11	11
+PREHOOK: query: truncate table alltypes
+PREHOOK: type: TRUNCATETABLE
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: truncate table alltypes
+POSTHOOK: type: TRUNCATETABLE
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: insert into table alltypes values (5, 5, 5, 10, 4.5, 7.25)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: insert into table alltypes values (5, 5, 5, 10, 4.5, 7.25)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@alltypes
+POSTHOOK: Lineage: alltypes.cbigint EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.cdouble EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col6, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.cfloat EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.cint EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.csmallint EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: alltypes.ctinyint EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select
+width_bucket(cdouble, ctinyint, cbigint, 10),
+width_bucket(cdouble, csmallint, cbigint, 10),
+width_bucket(cdouble, cint, cbigint, 10),
+width_bucket(cdouble, cfloat, cbigint, 10)
+from alltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes
+#### A masked pattern was here ####
+POSTHOOK: query: select
+width_bucket(cdouble, ctinyint, cbigint, 10),
+width_bucket(cdouble, csmallint, cbigint, 10),
+width_bucket(cdouble, cint, cbigint, 10),
+width_bucket(cdouble, cfloat, cbigint, 10)
+from alltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes
+#### A masked pattern was here ####
+5	5	5	6
+PREHOOK: query: create table alltinyints (
+    ctinyint1 TINYINT,
+    ctinyint2 TINYINT,
+    ctinyint3 TINYINT,
+    cint INT)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltinyints
+POSTHOOK: query: create table alltinyints (
+    ctinyint1 TINYINT,
+    ctinyint2 TINYINT,
+    ctinyint3 TINYINT,
+    cint INT)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltinyints
+PREHOOK: query: insert into table alltinyints values (5, 1, 10, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@alltinyints
+POSTHOOK: query: insert into table alltinyints values (5, 1, 10, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@alltinyints
+POSTHOOK: Lineage: alltinyints.cint EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: alltinyints.ctinyint1 EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: alltinyints.ctinyint2 EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: alltinyints.ctinyint3 EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+PREHOOK: query: select width_bucket(ctinyint1, ctinyint2, ctinyint3, cint) from alltinyints
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltinyints
+#### A masked pattern was here ####
+POSTHOOK: query: select width_bucket(ctinyint1, ctinyint2, ctinyint3, cint) from alltinyints
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltinyints
+#### A masked pattern was here ####
+1
+PREHOOK: query: create table allsmallints (
+    csmallint1 SMALLINT,
+    csmallint2 SMALLINT,
+    csmallint3 SMALLINT,
+    cint INT)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@allsmallints
+POSTHOOK: query: create table allsmallints (
+    csmallint1 SMALLINT,
+    csmallint2 SMALLINT,
+    csmallint3 SMALLINT,
+    cint INT)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@allsmallints
+PREHOOK: query: insert into table allsmallints values (5, 1, 10, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@allsmallints
+POSTHOOK: query: insert into table allsmallints values (5, 1, 10, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@allsmallints
+POSTHOOK: Lineage: allsmallints.cint EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: allsmallints.csmallint1 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: allsmallints.csmallint2 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: allsmallints.csmallint3 EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+PREHOOK: query: select width_bucket(csmallint1, csmallint2, csmallint3, cint) from allsmallints
+PREHOOK: type: QUERY
+PREHOOK: Input: default@allsmallints
+#### A masked pattern was here ####
+POSTHOOK: query: select width_bucket(csmallint1, csmallint2, csmallint3, cint) from allsmallints
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@allsmallints
+#### A masked pattern was here ####
+1
+PREHOOK: query: create table allints (
+    cint1 INT,
+    cint2 INT,
+    cint3 INT,
+    cint4 INT)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@allints
+POSTHOOK: query: create table allints (
+    cint1 INT,
+    cint2 INT,
+    cint3 INT,
+    cint4 INT)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@allints
+PREHOOK: query: insert into table allints values (5, 1, 10, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@allints
+POSTHOOK: query: insert into table allints values (5, 1, 10, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@allints
+POSTHOOK: Lineage: allints.cint1 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: allints.cint2 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: allints.cint3 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: allints.cint4 EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+PREHOOK: query: select width_bucket(cint1, cint2, cint3, cint4) from allints
+PREHOOK: type: QUERY
+PREHOOK: Input: default@allints
+#### A masked pattern was here ####
+POSTHOOK: query: select width_bucket(cint1, cint2, cint3, cint4) from allints
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@allints
+#### A masked pattern was here ####
+1
+PREHOOK: query: create table allbigints (
+    cbigint1 BIGINT,
+    cbigint2 BIGINT,
+    cbigint3 BIGINT,
+    cint INT)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@allbigints
+POSTHOOK: query: create table allbigints (
+    cbigint1 BIGINT,
+    cbigint2 BIGINT,
+    cbigint3 BIGINT,
+    cint INT)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@allbigints
+PREHOOK: query: insert into table allbigints values (5, 1, 10, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@allbigints
+POSTHOOK: query: insert into table allbigints values (5, 1, 10, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@allbigints
+POSTHOOK: Lineage: allbigints.cbigint1 EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: allbigints.cbigint2 EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: allbigints.cbigint3 EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: allbigints.cint EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+PREHOOK: query: select width_bucket(cbigint1, cbigint2, cbigint3, cint) from allbigints
+PREHOOK: type: QUERY
+PREHOOK: Input: default@allbigints
+#### A masked pattern was here ####
+POSTHOOK: query: select width_bucket(cbigint1, cbigint2, cbigint3, cint) from allbigints
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@allbigints
+#### A masked pattern was here ####
+1
+PREHOOK: query: create table allfloats (
+    cfloat1 FLOAT,
+    cfloat2 FLOAT,
+    cfloat3 FLOAT,
+    cint INT)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@allfloats
+POSTHOOK: query: create table allfloats (
+    cfloat1 FLOAT,
+    cfloat2 FLOAT,
+    cfloat3 FLOAT,
+    cint INT)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@allfloats
+PREHOOK: query: insert into table allfloats values (5.0, 1.0, 10.0, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@allfloats
+POSTHOOK: query: insert into table allfloats values (5.0, 1.0, 10.0, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@allfloats
+POSTHOOK: Lineage: allfloats.cfloat1 EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: allfloats.cfloat2 EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: allfloats.cfloat3 EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: allfloats.cint EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+PREHOOK: query: select width_bucket(cfloat1, cfloat2, cfloat3, cint) from allfloats
+PREHOOK: type: QUERY
+PREHOOK: Input: default@allfloats
+#### A masked pattern was here ####
+POSTHOOK: query: select width_bucket(cfloat1, cfloat2, cfloat3, cint) from allfloats
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@allfloats
+#### A masked pattern was here ####
+1
+PREHOOK: query: create table alldoubles (
+    cdouble1 DOUBLE,
+    cdouble2 DOUBLE,
+    cdouble3 DOUBLE,
+    cint INT)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alldoubles
+POSTHOOK: query: create table alldoubles (
+    cdouble1 DOUBLE,
+    cdouble2 DOUBLE,
+    cdouble3 DOUBLE,
+    cint INT)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alldoubles
+PREHOOK: query: insert into table alldoubles values (5.0, 1.0, 10.0, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@alldoubles
+POSTHOOK: query: insert into table alldoubles values (5.0, 1.0, 10.0, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@alldoubles
+POSTHOOK: Lineage: alldoubles.cdouble1 EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: alldoubles.cdouble2 EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: alldoubles.cdouble3 EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: alldoubles.cint EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+PREHOOK: query: select width_bucket(cdouble1, cdouble2, cdouble3, cint) from alldoubles
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alldoubles
+#### A masked pattern was here ####
+POSTHOOK: query: select width_bucket(cdouble1, cdouble2, cdouble3, cint) from alldoubles
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alldoubles
+#### A masked pattern was here ####
+1
+PREHOOK: query: create table testgroupingsets (c1 int, c2 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testgroupingsets
+POSTHOOK: query: create table testgroupingsets (c1 int, c2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testgroupingsets
+PREHOOK: query: insert into table testgroupingsets values (1, 1), (2, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@testgroupingsets
+POSTHOOK: query: insert into table testgroupingsets values (1, 1), (2, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@testgroupingsets
+POSTHOOK: Lineage: testgroupingsets.c1 EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: testgroupingsets.c2 EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: select c1, c2, width_bucket(5, c1, 10, case when grouping(c2) = 0 then 10 else 5 end) from testgroupingsets group by cube(c1, c2)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testgroupingsets
+#### A masked pattern was here ####
+POSTHOOK: query: select c1, c2, width_bucket(5, c1, 10, case when grouping(c2) = 0 then 10 else 5 end) from testgroupingsets group by cube(c1, c2)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testgroupingsets
+#### A masked pattern was here ####
+NULL	NULL	NULL
+NULL	1	NULL
+NULL	2	NULL
+1	NULL	3
+1	1	5
+2	NULL	2
+2	2	4
+PREHOOK: query: drop table alldecimaltype
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table alldecimaltype
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table alltypes
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@alltypes
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: drop table alltypes
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@alltypes
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: drop table alltinyints
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@alltinyints
+PREHOOK: Output: default@alltinyints
+POSTHOOK: query: drop table alltinyints
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@alltinyints
+POSTHOOK: Output: default@alltinyints
+PREHOOK: query: drop table allsmallints
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@allsmallints
+PREHOOK: Output: default@allsmallints
+POSTHOOK: query: drop table allsmallints
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@allsmallints
+POSTHOOK: Output: default@allsmallints
+PREHOOK: query: drop table allints
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@allints
+PREHOOK: Output: default@allints
+POSTHOOK: query: drop table allints
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@allints
+POSTHOOK: Output: default@allints
+PREHOOK: query: drop table allbigints
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@allbigints
+PREHOOK: Output: default@allbigints
+POSTHOOK: query: drop table allbigints
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@allbigints
+POSTHOOK: Output: default@allbigints
+PREHOOK: query: drop table allfloats
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@allfloats
+PREHOOK: Output: default@allfloats
+POSTHOOK: query: drop table allfloats
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@allfloats
+POSTHOOK: Output: default@allfloats
+PREHOOK: query: drop table alldoubles
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@alldoubles
+PREHOOK: Output: default@alldoubles
+POSTHOOK: query: drop table alldoubles
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@alldoubles
+POSTHOOK: Output: default@alldoubles
+PREHOOK: query: drop table testgroupingsets
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@testgroupingsets
+PREHOOK: Output: default@testgroupingsets
+POSTHOOK: query: drop table testgroupingsets
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@testgroupingsets
+POSTHOOK: Output: default@testgroupingsets


[42/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --cc metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 0000000,39b1676..c91dd4c
mode 000000,100644..100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@@ -1,0 -1,1579 +1,1622 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.cache;
+ 
+ import java.nio.ByteBuffer;
 -import java.util.ArrayList;
 -import java.util.LinkedList;
 -import java.util.List;
 -import java.util.Map;
++import java.util.*;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.ScheduledExecutorService;
+ import java.util.concurrent.ThreadFactory;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicReference;
+ 
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.FileUtils;
+ import org.apache.hadoop.hive.conf.HiveConf;
+ import org.apache.hadoop.hive.metastore.FileMetadataHandler;
+ import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.ObjectStore;
+ import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
+ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
+ import org.apache.hadoop.hive.metastore.RawStore;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.Warehouse;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.Date;
+ import org.apache.hadoop.hive.metastore.api.DateColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.Decimal;
+ import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.Index;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
++import org.apache.hadoop.hive.metastore.model.MTableWrite;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+ import org.apache.hive.common.util.HiveStringUtils;
+ import org.apache.thrift.TException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ 
+ // TODO filter->expr
+ // TODO functionCache
+ // TODO constraintCache
+ // TODO need sd nested copy?
+ // TODO String intern
+ // TODO restructure HBaseStore
+ // TODO monitor event queue
+ // TODO initial load slow?
+ // TODO size estimation
+ // TODO factor in extrapolation logic (using partitions found) during aggregate stats calculation
+ // TODO factor in NDV estimation (density based estimation) logic when merging NDVs from 2 colStats object
+ // TODO refactor to use same common code with StatObjectConverter (for merging 2 col stats objects)
+ 
+ public class CachedStore implements RawStore, Configurable {
+   private static ScheduledExecutorService cacheUpdateMaster = null;
+   private static AtomicReference<Thread> runningMasterThread = new AtomicReference<Thread>(null);
+   RawStore rawStore;
+   Configuration conf;
+   private PartitionExpressionProxy expressionProxy = null;
+   static boolean firstTime = true;
+ 
+   static final private Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName());
+ 
+   static class TableWrapper {
+     Table t;
+     String location;
+     Map<String, String> parameters;
+     byte[] sdHash;
+     TableWrapper(Table t, byte[] sdHash, String location, Map<String, String> parameters) {
+       this.t = t;
+       this.sdHash = sdHash;
+       this.location = location;
+       this.parameters = parameters;
+     }
+     public Table getTable() {
+       return t;
+     }
+     public byte[] getSdHash() {
+       return sdHash;
+     }
+     public String getLocation() {
+       return location;
+     }
+     public Map<String, String> getParameters() {
+       return parameters;
+     }
+   }
+ 
+   static class PartitionWrapper {
+     Partition p;
+     String location;
+     Map<String, String> parameters;
+     byte[] sdHash;
+     PartitionWrapper(Partition p, byte[] sdHash, String location, Map<String, String> parameters) {
+       this.p = p;
+       this.sdHash = sdHash;
+       this.location = location;
+       this.parameters = parameters;
+     }
+     public Partition getPartition() {
+       return p;
+     }
+     public byte[] getSdHash() {
+       return sdHash;
+     }
+     public String getLocation() {
+       return location;
+     }
+     public Map<String, String> getParameters() {
+       return parameters;
+     }
+   }
+ 
+   static class StorageDescriptorWrapper {
+     StorageDescriptor sd;
+     int refCount = 0;
+     StorageDescriptorWrapper(StorageDescriptor sd, int refCount) {
+       this.sd = sd;
+       this.refCount = refCount;
+     }
+     public StorageDescriptor getSd() {
+       return sd;
+     }
+     public int getRefCount() {
+       return refCount;
+     }
+   }
+ 
+   public CachedStore() {
+   }
+ 
+   @Override
+   public void setConf(Configuration conf) {
+     String rawStoreClassName = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_IMPL,
+         ObjectStore.class.getName());
+     try {
+       rawStore = ((Class<? extends RawStore>) MetaStoreUtils.getClass(
+           rawStoreClassName)).newInstance();
+     } catch (Exception e) {
+       throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+     }
+     rawStore.setConf(conf);
+     Configuration oldConf = this.conf;
+     this.conf = conf;
+     if (expressionProxy != null && conf != oldConf) {
+       LOG.warn("Unexpected setConf when we were already configured");
+     }
+     if (expressionProxy == null || conf != oldConf) {
+       expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+     }
+     if (firstTime) {
+       try {
+         LOG.info("Prewarming CachedStore");
+         prewarm();
+         LOG.info("CachedStore initialized");
+       } catch (Exception e) {
+         throw new RuntimeException(e);
+       }
+       firstTime = false;
+     }
+   }
+ 
+   private void prewarm() throws Exception {
+     List<String> dbNames = rawStore.getAllDatabases();
+     for (String dbName : dbNames) {
+       Database db = rawStore.getDatabase(dbName);
+       SharedCache.addDatabaseToCache(HiveStringUtils.normalizeIdentifier(dbName), db);
+       List<String> tblNames = rawStore.getAllTables(dbName);
+       for (String tblName : tblNames) {
+         Table table = rawStore.getTable(dbName, tblName);
+         SharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(dbName),
+             HiveStringUtils.normalizeIdentifier(tblName), table);
+         List<Partition> partitions = rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE);
+         for (Partition partition : partitions) {
+           SharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName),
+               HiveStringUtils.normalizeIdentifier(tblName), partition);
+         }
+         Map<String, ColumnStatisticsObj> aggrStatsPerPartition = rawStore
+             .getAggrColStatsForTablePartitions(dbName, tblName);
+         SharedCache.addPartitionColStatsToCache(aggrStatsPerPartition);
+       }
+     }
+     // Start the cache update master-worker threads
+     startCacheUpdateService();
+   }
+ 
+   private synchronized void startCacheUpdateService() {
+     if (cacheUpdateMaster == null) {
+       cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() {
+         public Thread newThread(Runnable r) {
+           Thread t = Executors.defaultThreadFactory().newThread(r);
+           t.setDaemon(true);
+           return t;
+         }
+       });
+       cacheUpdateMaster.scheduleAtFixedRate(new CacheUpdateMasterWork(this), 0, HiveConf
+           .getTimeVar(conf, HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY,
+               TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS);
+     }
+   }
+ 
+   static class CacheUpdateMasterWork implements Runnable {
+ 
+     private CachedStore cachedStore;
+ 
+     public CacheUpdateMasterWork(CachedStore cachedStore) {
+       this.cachedStore = cachedStore;
+     }
+ 
+     @Override
+     public void run() {
+       runningMasterThread.set(Thread.currentThread());
+       RawStore rawStore = cachedStore.getRawStore();
+       try {
+         List<String> dbNames = rawStore.getAllDatabases();
+         // Update the database in cache
+         if (!updateDatabases(rawStore, dbNames)) {
+           return;
+         }
+         // Update the tables and their partitions in cache
+         if (!updateTables(rawStore, dbNames)) {
+           return;
+         }
+       } catch (MetaException e) {
+         LOG.error("Updating CachedStore: error getting database names", e);
+       }
+     }
+ 
+     private boolean updateDatabases(RawStore rawStore, List<String> dbNames) {
+       if (dbNames != null) {
+         List<Database> databases = new ArrayList<Database>();
+         for (String dbName : dbNames) {
+           // If a preemption of this thread was requested, simply return before proceeding
+           if (Thread.interrupted()) {
+             return false;
+           }
+           Database db;
+           try {
+             db = rawStore.getDatabase(dbName);
+             databases.add(db);
+           } catch (NoSuchObjectException e) {
+             LOG.info("Updating CachedStore: database - " + dbName + " does not exist.", e);
+           }
+         }
+         // Update the cached database objects
+         SharedCache.refreshDatabases(databases);
+       }
+       return true;
+     }
+ 
+     private boolean updateTables(RawStore rawStore, List<String> dbNames) {
+       if (dbNames != null) {
+         List<Table> tables = new ArrayList<Table>();
+         for (String dbName : dbNames) {
+           try {
+             List<String> tblNames = rawStore.getAllTables(dbName);
+             for (String tblName : tblNames) {
+               // If a preemption of this thread was requested, simply return before proceeding
+               if (Thread.interrupted()) {
+                 return false;
+               }
+               Table table = rawStore.getTable(dbName, tblName);
+               tables.add(table);
+             }
+             // Update the cached database objects
+             SharedCache.refreshTables(dbName, tables);
+             for (String tblName : tblNames) {
+               // If a preemption of this thread was requested, simply return before proceeding
+               if (Thread.interrupted()) {
+                 return false;
+               }
+               List<Partition> partitions =
+                   rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE);
+               SharedCache.refreshPartitions(dbName, tblName, partitions);
+             }
+           } catch (MetaException | NoSuchObjectException e) {
+             LOG.error("Updating CachedStore: unable to read table", e);
+             return false;
+           }
+         }
+       }
+       return true;
+     }
+   }
+ 
+   // Interrupt the cache update background thread
+   // Fire and forget (the master will respond appropriately when it gets a chance)
+   // All writes to the cache go through synchronized methods, so fire & forget is fine.
+   private void interruptCacheUpdateMaster() {
+     if (runningMasterThread.get() != null) {
+       runningMasterThread.get().interrupt();
+     }
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return rawStore.getConf();
+   }
+ 
+   @Override
+   public void shutdown() {
+     rawStore.shutdown();
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+     return rawStore.openTransaction();
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     return rawStore.commitTransaction();
+   }
+ 
+   @Override
++  public Boolean commitTransactionExpectDeadlock() {
++    return null;
++  }
++
++  @Override
+   public void rollbackTransaction() {
+     rawStore.rollbackTransaction();
+   }
+ 
+   @Override
+   public void createDatabase(Database db)
+       throws InvalidObjectException, MetaException {
+     rawStore.createDatabase(db);
+     interruptCacheUpdateMaster();
+     SharedCache.addDatabaseToCache(HiveStringUtils.normalizeIdentifier(db.getName()), db.deepCopy());
+   }
+ 
+   @Override
+   public Database getDatabase(String dbName) throws NoSuchObjectException {
+     Database db = SharedCache.getDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName));
+     if (db == null) {
+       throw new NoSuchObjectException();
+     }
+     return SharedCache.getDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName));
+   }
+ 
+   @Override
+   public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
+     boolean succ = rawStore.dropDatabase(dbname);
+     if (succ) {
+       interruptCacheUpdateMaster();
+       SharedCache.removeDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbname));
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean alterDatabase(String dbName, Database db)
+       throws NoSuchObjectException, MetaException {
+     boolean succ = rawStore.alterDatabase(dbName, db);
+     if (succ) {
+       interruptCacheUpdateMaster();
+       SharedCache.alterDatabaseInCache(HiveStringUtils.normalizeIdentifier(dbName), db);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String pattern) throws MetaException {
+     List<String> results = new ArrayList<String>();
+     for (String dbName : SharedCache.listCachedDatabases()) {
+       dbName = HiveStringUtils.normalizeIdentifier(dbName);
+       if (CacheUtils.matches(dbName, pattern)) {
+         results.add(dbName);
+       }
+     }
+     return results;
+   }
+ 
+   @Override
+   public List<String> getAllDatabases() throws MetaException {
+     return SharedCache.listCachedDatabases();
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     return rawStore.createType(type);
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     return rawStore.getType(typeName);
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     return rawStore.dropType(typeName);
+   }
+ 
+   private void validateTableType(Table tbl) {
+     // If the table has property EXTERNAL set, update table type
+     // accordingly
+     String tableType = tbl.getTableType();
+     boolean isExternal = "TRUE".equals(tbl.getParameters().get("EXTERNAL"));
+     if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
+       if (isExternal) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       }
+     }
+     if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
+       if (!isExternal) {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+     tbl.setTableType(tableType);
+   }
+ 
+   @Override
+   public void createTable(Table tbl)
+       throws InvalidObjectException, MetaException {
+     rawStore.createTable(tbl);
+     interruptCacheUpdateMaster();
+     validateTableType(tbl);
+     SharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()),
+         HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl);
+   }
+ 
+   @Override
+   public boolean dropTable(String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException {
+     boolean succ = rawStore.dropTable(dbName, tableName);
+     if (succ) {
+       interruptCacheUpdateMaster();
+       SharedCache.removeTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tableName));
+     }
+     return succ;
+   }
+ 
+   @Override
+   public Table getTable(String dbName, String tableName) throws MetaException {
+     Table tbl = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tableName));
+     if (tbl != null) {
+       tbl.unsetPrivileges();
+       tbl.setRewriteEnabled(tbl.isRewriteEnabled());
+     }
+     return tbl;
+   }
+ 
+   @Override
+   public boolean addPartition(Partition part)
+       throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartition(part);
+     if (succ) {
+       interruptCacheUpdateMaster();
+       SharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(part.getDbName()),
+           HiveStringUtils.normalizeIdentifier(part.getTableName()), part);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean addPartitions(String dbName, String tblName,
+       List<Partition> parts) throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartitions(dbName, tblName, parts);
+     if (succ) {
+       interruptCacheUpdateMaster();
+       for (Partition part : parts) {
+         SharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName),
+             HiveStringUtils.normalizeIdentifier(tblName), part);
+       }
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean addPartitions(String dbName, String tblName,
+       PartitionSpecProxy partitionSpec, boolean ifNotExists)
+       throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartitions(dbName, tblName, partitionSpec, ifNotExists);
+     if (succ) {
+       interruptCacheUpdateMaster();
+       PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
+       while (iterator.hasNext()) {
+         Partition part = iterator.next();
+         SharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName),
+             HiveStringUtils.normalizeIdentifier(tblName), part);
+       }
+     }
+     return succ;
+   }
+ 
+   @Override
+   public Partition getPartition(String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException {
+     Partition part = SharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tableName), part_vals);
+     if (part != null) {
+       part.unsetPrivileges();
+     }
+     return part;
+   }
+ 
+   @Override
+   public boolean doesPartitionExist(String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException {
+     return SharedCache.existPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tableName), part_vals);
+   }
+ 
+   @Override
+   public boolean dropPartition(String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.dropPartition(dbName, tableName, part_vals);
+     if (succ) {
+       interruptCacheUpdateMaster();
+       SharedCache.removePartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tableName), part_vals);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String dbName, String tableName, int max)
+       throws MetaException, NoSuchObjectException {
+     List<Partition> parts = SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tableName), max);
+     if (parts != null) {
+       for (Partition part : parts) {
+         part.unsetPrivileges();
+       }
+     }
+     return parts;
+   }
+ 
+   @Override
+   public void alterTable(String dbName, String tblName, Table newTable)
+       throws InvalidObjectException, MetaException {
+     rawStore.alterTable(dbName, tblName, newTable);
+     interruptCacheUpdateMaster();
+     validateTableType(newTable);
+     SharedCache.alterTableInCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName), newTable);
+   }
+ 
+   @Override
+   public List<String> getTables(String dbName, String pattern)
+       throws MetaException {
+     List<String> tableNames = new ArrayList<String>();
+     for (Table table : SharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) {
+       if (CacheUtils.matches(table.getTableName(), pattern)) {
+         tableNames.add(table.getTableName());
+       }
+     }
+     return tableNames;
+   }
+ 
+   @Override
+   public List<String> getTables(String dbName, String pattern,
+       TableType tableType) throws MetaException {
+     List<String> tableNames = new ArrayList<String>();
+     for (Table table : SharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) {
+       if (CacheUtils.matches(table.getTableName(), pattern) &&
+           table.getTableType().equals(tableType.toString())) {
+         tableNames.add(table.getTableName());
+       }
+     }
+     return tableNames;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String dbNames, String tableNames,
+       List<String> tableTypes) throws MetaException {
+     return SharedCache.getTableMeta(HiveStringUtils.normalizeIdentifier(dbNames),
+         HiveStringUtils.normalizeIdentifier(tableNames), tableTypes);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String dbName,
+       List<String> tblNames) throws MetaException, UnknownDBException {
+     List<Table> tables = new ArrayList<Table>();
+     for (String tblName : tblNames) {
+       tables.add(SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tblName)));
+     }
+     return tables;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String dbName) throws MetaException {
+     List<String> tblNames = new ArrayList<String>();
+     for (Table tbl : SharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) {
+       tblNames.add(HiveStringUtils.normalizeIdentifier(tbl.getTableName()));
+     }
+     return tblNames;
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String dbName, String filter,
+       short max_tables) throws MetaException, UnknownDBException {
+     List<String> tableNames = new ArrayList<String>();
+     int count = 0;
+     for (Table table : SharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) {
+       if (CacheUtils.matches(table.getTableName(), filter)
+           && (max_tables == -1 || count < max_tables)) {
+         tableNames.add(table.getTableName());
+         count++;
+       }
+     }
+     return tableNames;
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String dbName, String tblName,
+       short max_parts) throws MetaException {
+     List<String> partitionNames = new ArrayList<String>();
+     Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName));
+     int count = 0;
+     for (Partition part : SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName), max_parts)) {
+       if (max_parts == -1 || count < max_parts) {
+         partitionNames.add(Warehouse.makePartName(t.getPartitionKeys(), part.getValues()));
+       }
+     }
+     return partitionNames;
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesByFilter(String db_name,
+       String tbl_name, String filter, short max_parts) throws MetaException {
+     // TODO Translate filter -> expr
+     return null;
+   }
+ 
+   @Override
+   public void alterPartition(String dbName, String tblName,
+       List<String> partVals, Partition newPart)
+       throws InvalidObjectException, MetaException {
+     rawStore.alterPartition(dbName, tblName, partVals, newPart);
+     interruptCacheUpdateMaster();
+     SharedCache.alterPartitionInCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart);
+   }
+ 
+   @Override
+   public void alterPartitions(String dbName, String tblName,
+       List<List<String>> partValsList, List<Partition> newParts)
+       throws InvalidObjectException, MetaException {
+     rawStore.alterPartitions(dbName, tblName, partValsList, newParts);
+     interruptCacheUpdateMaster();
+     for (int i=0;i<partValsList.size();i++) {
+       List<String> partVals = partValsList.get(i);
+       Partition newPart = newParts.get(i);
+       SharedCache.alterPartitionInCache(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart);
+     }
+   }
+ 
+   @Override
+   public boolean addIndex(Index index)
+       throws InvalidObjectException, MetaException {
+     return rawStore.addIndex(index);
+   }
+ 
+   @Override
+   public Index getIndex(String dbName, String origTableName, String indexName)
+       throws MetaException {
+     return rawStore.getIndex(dbName, origTableName, indexName);
+   }
+ 
+   @Override
+   public boolean dropIndex(String dbName, String origTableName,
+       String indexName) throws MetaException {
+     return rawStore.dropIndex(dbName, origTableName, indexName);
+   }
+ 
+   @Override
+   public List<Index> getIndexes(String dbName, String origTableName, int max)
+       throws MetaException {
+     return rawStore.getIndexes(dbName, origTableName, max);
+   }
+ 
+   @Override
+   public List<String> listIndexNames(String dbName, String origTableName,
+       short max) throws MetaException {
+     return rawStore.listIndexNames(dbName, origTableName, max);
+   }
+ 
+   @Override
+   public void alterIndex(String dbname, String baseTblName, String name,
+       Index newIndex) throws InvalidObjectException, MetaException {
+     rawStore.alterIndex(dbname, baseTblName, name, newIndex);
+   }
+ 
+   private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
+       String defaultPartName, short maxParts, List<String> result) throws MetaException, NoSuchObjectException {
+     List<Partition> parts = SharedCache.listCachedPartitions(
+         HiveStringUtils.normalizeIdentifier(table.getDbName()),
+         HiveStringUtils.normalizeIdentifier(table.getTableName()), maxParts);
+     for (Partition part : parts) {
+       result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
+     }
+     List<String> columnNames = new ArrayList<String>();
+     List<PrimitiveTypeInfo> typeInfos = new ArrayList<PrimitiveTypeInfo>();
+     for (FieldSchema fs : table.getPartitionKeys()) {
+       columnNames.add(fs.getName());
+       typeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType()));
+     }
+     if (defaultPartName == null || defaultPartName.isEmpty()) {
+       defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME);
+     }
+     return expressionProxy.filterPartitionsByExpr(
+         columnNames, typeInfos, expr, defaultPartName, result);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String dbName, String tblName,
+       String filter, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result)
+       throws TException {
+     List<String> partNames = new LinkedList<String>();
+     Table table = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tblName));
+     boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(
+         table, expr, defaultPartitionName, maxParts, partNames);
+     for (String partName : partNames) {
+       Partition part = SharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName));
+       result.add(part);
+     }
+     return hasUnknownPartitions;
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String dbName, String tblName,
+       String filter) throws MetaException, NoSuchObjectException {
+     Table table = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName));
+     // TODO filter -> expr
+     return 0;
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException {
+     String defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME);
+     List<String> partNames = new LinkedList<String>();
+     Table table = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName));
+     getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames);
+     return partNames.size();
+   }
+ 
+   public static List<String> partNameToVals(String name) {
+     if (name == null) return null;
+     List<String> vals = new ArrayList<String>();
+     String[] kvp = name.split("/");
+     for (String kv : kvp) {
+       vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1)));
+     }
+     return vals;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+     List<Partition> partitions = new ArrayList<Partition>();
+     for (String partName : partNames) {
+       Partition part = SharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName));
+       if (part!=null) {
+         partitions.add(part);
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String dbName, String tblName,
+       Map<String, String> partVals, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return rawStore.markPartitionForEvent(dbName, tblName, partVals, evtType);
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return rawStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType);
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.addRole(rowName, ownerName);
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.removeRole(roleName);
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName,
+       PrincipalType principalType, String grantor, PrincipalType grantorType,
+       boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return rawStore.grantRole(role, userName, principalType, grantor, grantorType, grantOption);
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName,
+       PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.revokeRole(role, userName, principalType, grantOption);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getUserPrivilegeSet(userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getDBPrivilegeSet(dbName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName,
+       String tableName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return rawStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName,
+       String tableName, String partition, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getPartitionPrivilegeSet(dbName, tableName, partition, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName,
+       String tableName, String partitionName, String columnName,
+       String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return rawStore.getColumnPrivilegeSet(dbName, tableName, partitionName, columnName, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalGlobalGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String dbName) {
+     return rawStore.listPrincipalDBGrants(principalName, principalType, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String dbName, String tableName) {
+     return rawStore.listAllTableGrants(principalName, principalType, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(
+       String principalName, PrincipalType principalType, String dbName,
+       String tableName, List<String> partValues, String partName) {
+     return rawStore.listPrincipalPartitionGrants(principalName, principalType, dbName, tableName, partValues, partName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(
+       String principalName, PrincipalType principalType, String dbName,
+       String tableName, String columnName) {
+     return rawStore.listPrincipalTableColumnGrants(principalName, principalType, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String dbName,
+       String tableName, List<String> partValues, String partName,
+       String columnName) {
+     return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, dbName, tableName, partValues, partName, columnName);
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.grantPrivileges(privileges);
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.revokePrivileges(privileges, grantOption);
+   }
+ 
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+     return rawStore.getRole(roleName);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+     return rawStore.listRoleNames();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName,
+       PrincipalType principalType) {
+     return rawStore.listRoles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+       PrincipalType principalType) {
+     return rawStore.listRolesWithGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return rawStore.listRoleMembers(roleName);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String dbName, String tblName,
+       List<String> partVals, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     Partition p = SharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName), partVals);
+     if (p!=null) {
+       Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tblName));
+       String partName = Warehouse.makePartName(t.getPartitionKeys(), partVals);
+       PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName,
+           userName, groupNames);
+       p.setPrivileges(privs);
+     }
+     return p;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String dbName, String tblName,
+       short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName));
+     List<Partition> partitions = new ArrayList<Partition>();
+     int count = 0;
+     for (Partition part : SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName), maxParts)) {
+       if (maxParts == -1 || count < maxParts) {
+         String partName = Warehouse.makePartName(t.getPartitionKeys(), part.getValues());
+         PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName,
+             userName, groupNames);
+         part.setPrivileges(privs);
+         partitions.add(part);
+         count++;
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String dbName, String tblName,
+       List<String> partVals, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     List<String> partNames = new ArrayList<String>();
+     int count = 0;
+     Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName));
+     for (Partition part : SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName), maxParts)) {
+       boolean psMatch = true;
+       for (int i=0;i<partVals.size();i++) {
+         String psVal = partVals.get(i);
+         String partVal = part.getValues().get(i);
+         if (psVal!=null && !psVal.isEmpty() && !psVal.equals(partVal)) {
+           psMatch = false;
+           break;
+         }
+       }
+       if (!psMatch) {
+         break;
+       }
+       if (maxParts == -1 || count < maxParts) {
+         partNames.add(Warehouse.makePartName(t.getPartitionKeys(), part.getValues()));
+         count++;
+       }
+     }
+     return partNames;
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String dbName,
+       String tblName, List<String> partVals, short maxParts, String userName,
+       List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+     List<Partition> partitions = new ArrayList<Partition>();
+     Table t = SharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName));
+     int count = 0;
+     for (Partition part : SharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName),
+         HiveStringUtils.normalizeIdentifier(tblName), maxParts)) {
+       boolean psMatch = true;
+       for (int i=0;i<partVals.size();i++) {
+         String psVal = partVals.get(i);
+         String partVal = part.getValues().get(i);
+         if (psVal!=null && !psVal.isEmpty() && !psVal.equals(partVal)) {
+           psMatch = false;
+           break;
+         }
+       }
+       if (!psMatch) {
+         continue;
+       }
+       if (maxParts == -1 || count < maxParts) {
+         String partName = Warehouse.makePartName(t.getPartitionKeys(), part.getValues());
+         PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName,
+             userName, groupNames);
+         part.setPrivileges(privs);
+         partitions.add(part);
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public boolean updateTableColumnStatistics(ColumnStatistics colStats)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
+     boolean succ = rawStore.updateTableColumnStatistics(colStats);
+     if (succ) {
+       SharedCache.updateTableColumnStatistics(HiveStringUtils.normalizeIdentifier(colStats.getStatsDesc().getDbName()),
+           HiveStringUtils.normalizeIdentifier(colStats.getStatsDesc().getTableName()), colStats.getStatsObj());
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean updatePartitionColumnStatistics(ColumnStatistics colStats,
+       List<String> partVals) throws NoSuchObjectException, MetaException,
+       InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals);
+     if (succ) {
+       SharedCache.updatePartitionColumnStatistics(HiveStringUtils.normalizeIdentifier(colStats.getStatsDesc().getDbName()),
+           HiveStringUtils.normalizeIdentifier(colStats.getStatsDesc().getTableName()), partVals, colStats.getStatsObj());
+     }
+     return succ;
+   }
+ 
+   @Override
+   public ColumnStatistics getTableColumnStatistics(String dbName,
+       String tableName, List<String> colName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getTableColumnStatistics(dbName, tableName, colName);
+   }
+ 
+   @Override
+   public List<ColumnStatistics> getPartitionColumnStatistics(String dbName,
+       String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames);
+   }
+ 
+   @Override
+   public boolean deletePartitionColumnStatistics(String dbName,
+       String tableName, String partName, List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
+     return rawStore.deletePartitionColumnStatistics(dbName, tableName, partName, partVals, colName);
+   }
+ 
+   @Override
+   public boolean deleteTableColumnStatistics(String dbName, String tableName,
+       String colName) throws NoSuchObjectException, MetaException,
+       InvalidObjectException, InvalidInputException {
+     return rawStore.deleteTableColumnStatistics(dbName, tableName, colName);
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+     return rawStore.cleanupEvents();
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return rawStore.addToken(tokenIdentifier, delegationToken);
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return rawStore.removeToken(tokenIdentifier);
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return rawStore.getToken(tokenIdentifier);
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+     return rawStore.getAllTokenIdentifiers();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) throws MetaException {
+     return rawStore.addMasterKey(key);
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key)
+       throws NoSuchObjectException, MetaException {
+     rawStore.updateMasterKey(seqNo, key);
+   }
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) {
+     return rawStore.removeMasterKey(keySeq);
+   }
+ 
+   @Override
+   public String[] getMasterKeys() {
+     return rawStore.getMasterKeys();
+   }
+ 
+   @Override
+   public void verifySchema() throws MetaException {
+     rawStore.verifySchema();
+   }
+ 
+   @Override
+   public String getMetaStoreSchemaVersion() throws MetaException {
+     return rawStore.getMetaStoreSchemaVersion();
+   }
+ 
+   @Override
+   public void setMetaStoreSchemaVersion(String version, String comment)
+       throws MetaException {
+     rawStore.setMetaStoreSchemaVersion(version, comment);
+   }
+ 
+   @Override
+   public void dropPartitions(String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+     rawStore.dropPartitions(dbName, tblName, partNames);
+     interruptCacheUpdateMaster();
+     for (String partName : partNames) {
+       List<String> vals = partNameToVals(partName);
+       SharedCache.removePartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tblName), vals);
+     }
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalDBGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalTableGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalPartitionGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalTableColumnGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+     return rawStore.listGlobalGrantsAll();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
+     return rawStore.listDBGrantsAll(dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName,
+       String tableName, String partitionName, String columnName) {
+     return rawStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableGrantsAll(String dbName,
+       String tableName) {
+     return rawStore.listTableGrantsAll(dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName,
+       String tableName, String partitionName) {
+     return rawStore.listPartitionGrantsAll(dbName, tableName, partitionName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName,
+       String tableName, String columnName) {
+     return rawStore.listTableColumnGrantsAll(dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public void createFunction(Function func)
+       throws InvalidObjectException, MetaException {
+     // TODO fucntionCache
+     rawStore.createFunction(func);
+   }
+ 
+   @Override
+   public void alterFunction(String dbName, String funcName,
+       Function newFunction) throws InvalidObjectException, MetaException {
+     // TODO fucntionCache
+     rawStore.alterFunction(dbName, funcName, newFunction);
+   }
+ 
+   @Override
+   public void dropFunction(String dbName, String funcName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     // TODO fucntionCache
+     rawStore.dropFunction(dbName, funcName);
+   }
+ 
+   @Override
+   public Function getFunction(String dbName, String funcName)
+       throws MetaException {
+     // TODO fucntionCache
+     return rawStore.getFunction(dbName, funcName);
+   }
+ 
+   @Override
+   public List<Function> getAllFunctions() throws MetaException {
+     // TODO fucntionCache
+     return rawStore.getAllFunctions();
+   }
+ 
+   @Override
+   public List<String> getFunctions(String dbName, String pattern)
+       throws MetaException {
+     // TODO fucntionCache
+     return rawStore.getFunctions(dbName, pattern);
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String dbName, String tblName,
+       List<String> partNames, List<String> colNames)
+       throws MetaException, NoSuchObjectException {
+     List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(colNames.size());
+     for (String colName : colNames) {
+       colStats.add(mergeColStatsForPartitions(HiveStringUtils.normalizeIdentifier(dbName),
+           HiveStringUtils.normalizeIdentifier(tblName), partNames, colName));
+     }
+     // TODO: revisit the partitions not found case for extrapolation
+     return new AggrStats(colStats, partNames.size());
+   }
+ 
+   private ColumnStatisticsObj mergeColStatsForPartitions(String dbName, String tblName,
+       List<String> partNames, String colName) throws MetaException {
+     ColumnStatisticsObj colStats = null;
+     for (String partName : partNames) {
+       String colStatsCacheKey = CacheUtils.buildKey(dbName, tblName, partNameToVals(partName), colName);
+       ColumnStatisticsObj colStatsForPart = SharedCache.getCachedPartitionColStats(
+           colStatsCacheKey);
+       if (colStats == null) {
+         colStats = colStatsForPart;
+       } else {
+         colStats = mergeColStatsObj(colStats, colStatsForPart);
+       }
+     }
+     return colStats;
+   }
+ 
+   private ColumnStatisticsObj mergeColStatsObj(ColumnStatisticsObj colStats1,
+       ColumnStatisticsObj colStats2) throws MetaException {
+     if ((!colStats1.getColType().equalsIgnoreCase(colStats2.getColType()))
+         && (!colStats1.getColName().equalsIgnoreCase(colStats2.getColName()))) {
+       throw new MetaException("Can't merge column stats for two partitions for different columns.");
+     }
+     ColumnStatisticsData csd = new ColumnStatisticsData();
+     ColumnStatisticsObj cso = new ColumnStatisticsObj(colStats1.getColName(),
+         colStats1.getColType(), csd);
+     ColumnStatisticsData csData1 = colStats1.getStatsData();
+     ColumnStatisticsData csData2 = colStats2.getStatsData();
+     String colType = colStats1.getColType().toLowerCase();
+     if (colType.equals("boolean")) {
+       BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
+       boolStats.setNumFalses(csData1.getBooleanStats().getNumFalses()
+           + csData2.getBooleanStats().getNumFalses());
+       boolStats.setNumTrues(csData1.getBooleanStats().getNumTrues()
+           + csData2.getBooleanStats().getNumTrues());
+       boolStats.setNumNulls(csData1.getBooleanStats().getNumNulls()
+           + csData2.getBooleanStats().getNumNulls());
+       csd.setBooleanStats(boolStats);
+     } else if (colType.equals("string") || colType.startsWith("varchar")
+         || colType.startsWith("char")) {
+       StringColumnStatsData stringStats = new StringColumnStatsData();
+       stringStats.setNumNulls(csData1.getStringStats().getNumNulls()
+           + csData2.getStringStats().getNumNulls());
+       stringStats.setAvgColLen(Math.max(csData1.getStringStats().getAvgColLen(), csData2
+           .getStringStats().getAvgColLen()));
+       stringStats.setMaxColLen(Math.max(csData1.getStringStats().getMaxColLen(), csData2
+           .getStringStats().getMaxColLen()));
+       stringStats.setNumDVs(Math.max(csData1.getStringStats().getNumDVs(), csData2.getStringStats()
+           .getNumDVs()));
+       csd.setStringStats(stringStats);
+     } else if (colType.equals("binary")) {
+       BinaryColumnStatsData binaryStats = new BinaryColumnStatsData();
+       binaryStats.setNumNulls(csData1.getBinaryStats().getNumNulls()
+           + csData2.getBinaryStats().getNumNulls());
+       binaryStats.setAvgColLen(Math.max(csData1.getBinaryStats().getAvgColLen(), csData2
+           .getBinaryStats().getAvgColLen()));
+       binaryStats.setMaxColLen(Math.max(csData1.getBinaryStats().getMaxColLen(), csData2
+           .getBinaryStats().getMaxColLen()));
+       csd.setBinaryStats(binaryStats);
+     } else if (colType.equals("bigint") || colType.equals("int") || colType.equals("smallint")
+         || colType.equals("tinyint") || colType.equals("timestamp")) {
+       LongColumnStatsData longStats = new LongColumnStatsData();
+       longStats.setNumNulls(csData1.getLongStats().getNumNulls()
+           + csData2.getLongStats().getNumNulls());
+       longStats.setHighValue(Math.max(csData1.getLongStats().getHighValue(), csData2.getLongStats()
+           .getHighValue()));
+       longStats.setLowValue(Math.min(csData1.getLongStats().getLowValue(), csData2.getLongStats()
+           .getLowValue()));
+       longStats.setNumDVs(Math.max(csData1.getLongStats().getNumDVs(), csData2.getLongStats()
+           .getNumDVs()));
+       csd.setLongStats(longStats);
+     } else if (colType.equals("date")) {
+       DateColumnStatsData dateStats = new DateColumnStatsData();
+       dateStats.setNumNulls(csData1.getDateStats().getNumNulls()
+           + csData2.getDateStats().getNumNulls());
+       dateStats.setHighValue(new Date(Math.max(csData1.getDateStats().getHighValue()
+           .getDaysSinceEpoch(), csData2.getDateStats().getHighValue().getDaysSinceEpoch())));
+       dateStats.setHighValue(new Date(Math.min(csData1.getDateStats().getLowValue()
+           .getDaysSinceEpoch(), csData2.getDateStats().getLowValue().getDaysSinceEpoch())));
+       dateStats.setNumDVs(Math.max(csData1.getDateStats().getNumDVs(), csData2.getDateStats()
+           .getNumDVs()));
+       csd.setDateStats(dateStats);
+     } else if (colType.equals("double") || colType.equals("float")) {
+       DoubleColumnStatsData doubleStats = new DoubleColumnStatsData();
+       doubleStats.setNumNulls(csData1.getDoubleStats().getNumNulls()
+           + csData2.getDoubleStats().getNumNulls());
+       doubleStats.setHighValue(Math.max(csData1.getDoubleStats().getHighValue(), csData2
+           .getDoubleStats().getHighValue()));
+       doubleStats.setLowValue(Math.min(csData1.getDoubleStats().getLowValue(), csData2
+           .getDoubleStats().getLowValue()));
+       doubleStats.setNumDVs(Math.max(csData1.getDoubleStats().getNumDVs(), csData2.getDoubleStats()
+           .getNumDVs()));
+       csd.setDoubleStats(doubleStats);
+     } else if (colType.startsWith("decimal")) {
+       DecimalColumnStatsData decimalStats = new DecimalColumnStatsData();
+       decimalStats.setNumNulls(csData1.getDecimalStats().getNumNulls()
+           + csData2.getDecimalStats().getNumNulls());
+       Decimal high = (csData1.getDecimalStats().getHighValue()
+           .compareTo(csData2.getDecimalStats().getHighValue()) > 0) ? csData1.getDecimalStats()
+           .getHighValue() : csData2.getDecimalStats().getHighValue();
+       decimalStats.setHighValue(high);
+       Decimal low = (csData1.getDecimalStats().getLowValue()
+           .compareTo(csData2.getDecimalStats().getLowValue()) < 0) ? csData1.getDecimalStats()
+           .getLowValue() : csData2.getDecimalStats().getLowValue();
+       decimalStats.setLowValue(low);
+       decimalStats.setNumDVs(Math.max(csData1.getDecimalStats().getNumDVs(), csData2
+           .getDecimalStats().getNumDVs()));
+       csd.setDecimalStats(decimalStats);
+     }
+     return cso;
+   }
+ 
+   @Override
+   public NotificationEventResponse getNextNotification(
+       NotificationEventRequest rqst) {
+     return rawStore.getNextNotification(rqst);
+   }
+ 
+   @Override
+   public void addNotificationEvent(NotificationEvent event) {
+     rawStore.addNotificationEvent(event);
+   }
+ 
+   @Override
+   public void cleanNotificationEvents(int olderThan) {
+     rawStore.cleanNotificationEvents(olderThan);
+   }
+ 
+   @Override
+   public CurrentNotificationEventId getCurrentNotificationEventId() {
+     return rawStore.getCurrentNotificationEventId();
+   }
+ 
+   @Override
+   public void flushCache() {
+     rawStore.flushCache();
+   }
+ 
+   @Override
+   public ByteBuffer[] getFileMetadata(List<Long> fileIds) throws MetaException {
+     return rawStore.getFileMetadata(fileIds);
+   }
+ 
+   @Override
+   public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata,
+       FileMetadataExprType type) throws MetaException {
+     rawStore.putFileMetadata(fileIds, metadata, type);
+   }
+ 
+   @Override
+   public boolean isFileMetadataSupported() {
+     return rawStore.isFileMetadataSupported();
+   }
+ 
+   @Override
+   public void getFileMetadataByExpr(List<Long> fileIds,
+       FileMetadataExprType type, byte[] expr, ByteBuffer[] metadatas,
+       ByteBuffer[] exprResults, boolean[] eliminated) throws MetaException {
+     rawStore.getFileMetadataByExpr(fileIds, type, expr, metadatas, exprResults, eliminated);
+   }
+ 
+   @Override
+   public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+     return rawStore.getFileMetadataHandler(type);
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return SharedCache.getCachedTableCount();
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return SharedCache.getCachedPartitionCount();
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return SharedCache.getCachedDatabaseCount();
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name)
+       throws MetaException {
+     // TODO constraintCache
+     return rawStore.getPrimaryKeys(db_name, tbl_name);
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(String parent_db_name,
+       String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+       throws MetaException {
+     // TODO constraintCache
+     return rawStore.getForeignKeys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name);
+   }
+ 
+   @Override
+   public void createTableWithConstraints(Table tbl,
+       List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys)
+       throws InvalidObjectException, MetaException {
+     // TODO constraintCache
+     rawStore.createTableWithConstraints(tbl, primaryKeys, foreignKeys);
+     SharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()),
+         HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl);
+   }
+ 
+   @Override
+   public void dropConstraint(String dbName, String tableName,
+       String constraintName) throws NoSuchObjectException {
+     // TODO constraintCache
+     rawStore.dropConstraint(dbName, tableName, constraintName);
+   }
+ 
+   @Override
+   public void addPrimaryKeys(List<SQLPrimaryKey> pks)
+       throws InvalidObjectException, MetaException {
+     // TODO constraintCache
+     rawStore.addPrimaryKeys(pks);
+   }
+ 
+   @Override
+   public void addForeignKeys(List<SQLForeignKey> fks)
+       throws InvalidObjectException, MetaException {
+     // TODO constraintCache
+     rawStore.addForeignKeys(fks);
+   }
+ 
+   @Override
++  public void updateTableWrite(MTableWrite tw) {
++
++  }
++
++  @Override
++  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) throws MetaException {
++    return null;
++  }
++
++  @Override
++  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
++
++  }
++
++  @Override
++  public List<Long> getTableWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException {
++    return null;
++  }
++
++  @Override
++  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
++    return null;
++  }
++
++  @Override
++  public List<MTableWrite> getTableWrites(String dbName, String tblName, long from, long to) throws MetaException {
++    return null;
++  }
++
++  @Override
++  public Collection<String> getAllPartitionLocations(String dbName, String tblName) {
++    return null;
++  }
++
++  @Override
++  public void deleteTableWrites(String dbName, String tblName, long from, long to) throws MetaException {
++
++  }
++
++  @Override
+   public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(
+       String dbName, String tableName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getAggrColStatsForTablePartitions(dbName, tableName);
+   }
+ 
+   public RawStore getRawStore() {
+     return rawStore;
+   }
+ 
+   @VisibleForTesting
+   public void setRawStore(RawStore rawStore) {
+     this.rawStore = rawStore;
+   }
 -}
++}

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --cc metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index 1340645,f6420f5..206196d
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@@ -2848,55 -2854,9 +2870,62 @@@ public class HBaseStore implements RawS
    }
  
    @Override
+   public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
+       String tableName) throws MetaException, NoSuchObjectException {
+     // TODO: see if it makes sense to implement this here
+     return null;
+   }
++
++  @Override
 +  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
 +    // TODO: Auto-generated method stub
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void updateTableWrite(MTableWrite tw) {
 +    // TODO: Auto-generated method stub
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) {
 +    // TODO: Auto-generated method stub
 +    throw new UnsupportedOperationException();
 +  }
-  
++
 +
 +  @Override
 +  public List<Long> getTableWriteIds(
 +      String dbName, String tblName, long watermarkId, long nextWriteId, char state) {
 +    // TODO: Auto-generated method stub
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
 +    // TODO: Auto-generated method stub
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public List<MTableWrite> getTableWrites(String dbName, String tblName,
 +      long from, long to) throws MetaException {
 +    // TODO: Auto-generated method stub
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public Collection<String> getAllPartitionLocations(String dbName,
 +      String tblName) {
 +    // TODO: Auto-generated method stub
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void deleteTableWrites(String dbName, String tblName, long from,
 +      long to) throws MetaException {
 +    // TODO: Auto-generated method stub
 +    throw new UnsupportedOperationException();
 +  }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/model/package.jdo
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --cc metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 64da9b4,3e3fd20..7760bc7
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@@ -873,50 -872,9 +874,57 @@@ public class DummyRawStoreControlledCom
    }
  
    @Override
+   public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
+       String tableName) throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
++
++  @Override
 +  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
 +  }
 +
 +  @Override
 +  public void updateTableWrite(MTableWrite tw) {
 +
 +  }
 +
 +  @Override
 +  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) {
 +    return null;
 +  }
 +
 +  @Override
 +  @CanNotRetry
 +  public Boolean commitTransactionExpectDeadlock() {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<Long> getTableWriteIds(
 +      String dbName, String tblName, long watermarkId, long nextWriteId, char state) {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<MTableWrite> getTableWrites(String dbName, String tblName,
 +      long from, long to) throws MetaException {
 +    return null;
 +  }
 +
 +  @Override
 +  public Collection<String> getAllPartitionLocations(String dbName,
 +      String tblName) {
 +    return null;
 +  }
 +
 +  @Override
 +  public void deleteTableWrites(String dbName, String tblName, long from,
 +      long to) throws MetaException {
 +  }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --cc metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index d6460cd,91d8c2a..df05af1
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@@ -892,45 -888,11 +893,52 @@@ public class DummyRawStoreForJdoConnect
    }
  
    @Override
+   public Map<String, ColumnStatisticsObj> getAggrColStatsForTablePartitions(String dbName,
+       String tableName) throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
++
++  @Override
 +  public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) {
 +  }
 +
 +  @Override
 +  public void updateTableWrite(MTableWrite tw) {
 +  }
 +
 +  @Override
 +  public MTableWrite getTableWrite(String dbName, String tblName, long writeId) {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<Long> getTableWriteIds(
 +      String dbName, String tblName, long watermarkId, long nextWriteId, char state) {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<FullTableName> getAllMmTablesForCleanup() throws MetaException {
 +    return null;
 +  }
 +
 +  @Override
 +  public List<MTableWrite> getTableWrites(String dbName, String tblName,
 +      long from, long to) throws MetaException {
 +    return null;
 +  }
 +
 +  @Override
 +  public Collection<String> getAllPartitionLocations(String dbName,
 +      String tblName) {
 +    return null;
 +  }
 +
 +  @Override
 +  public void deleteTableWrites(String dbName, String tblName, long from,
 +      long to) throws MetaException {
 +  }
  }
  
  

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --cc metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
index aaa03fb,69e8826..42bc8b2
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@@ -32,7 -29,7 +32,8 @@@ import org.apache.hadoop.hive.common.me
  import org.apache.hadoop.hive.common.metrics.metrics2.MetricsReporting;
  import org.apache.hadoop.hive.common.metrics.MetricsTestUtils;
  import org.apache.hadoop.hive.conf.HiveConf;
 +import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
  import org.apache.hadoop.hive.metastore.api.Database;
  import org.apache.hadoop.hive.metastore.api.FieldSchema;
  import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
@@@ -50,8 -50,7 +54,9 @@@ import org.apache.hadoop.hive.metastore
  import org.apache.hadoop.hive.metastore.api.SerDeInfo;
  import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
  import org.apache.hadoop.hive.metastore.api.Table;
 +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
 +import org.apache.hadoop.hive.metastore.model.MTableWrite;
  import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
  import org.apache.hadoop.hive.serde.serdeConstants;
  import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
@@@ -65,7 -63,7 +71,8 @@@ import org.mockito.Mockito
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
 +import com.google.common.base.Supplier;
+ import javax.jdo.Query;
  
  public class TestObjectStore {
    private ObjectStore objectStore = null;
@@@ -187,158 -226,7 +244,158 @@@
  
      objectStore.dropDatabase(DB1);
    }
-   
+ 
 +
 +  /**
 +   * Test table operations
 +   */
 +  @Test
 +  public void testMmCleaner() throws Exception {
 +    HiveConf conf = new HiveConf();
 +    conf.set(ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT.varname, "3ms");
 +    conf.set(ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT.varname, "20ms");
 +    conf.set(ConfVars.HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD.varname, "5ms");
 +    conf.set("fs.mock.impl", MockFileSystem.class.getName());
 +
 +    MockFileSystem mfs = (MockFileSystem)(new Path("mock:///").getFileSystem(conf));
 +    mfs.clear();
 +    mfs.allowDelete = true;
 +    // Don't add the files just yet...
 +    MockFile[] files = new MockFile[9];
 +    for (int i = 0; i < files.length; ++i) {
 +      files[i] = new MockFile("mock:/foo/mm_" + i + "/1", 0, new byte[0]);
 +    }
 +
 +    LongSupplier time = new LongSupplier();
 +
 +    MmCleanerThread mct = new MmCleanerThread(0);
 +    mct.setHiveConf(conf);
 +    mct.overrideTime(time);
 +
 +    Database db1 = new Database(DB1, "description", "locationurl", null);
 +    objectStore.createDatabase(db1);
 +    StorageDescriptor sd = createFakeSd("mock:/foo");
 +    HashMap<String,String> params = new HashMap<String,String>();
 +    params.put("EXTERNAL", "false");
 +    params.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
 +    params.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES, "insert_only");
 +    Table tbl = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd,
 +        null, params, null, null, "MANAGED_TABLE");
 +    objectStore.createTable(tbl);
 +
 +    // Add write #0 so the watermark wouldn't advance; skip write #1, add #2 at 0, skip #3
 +    createCompleteTableWrite(mfs, files, 0, time, tbl, HiveMetaStore.MM_WRITE_OPEN);
 +    mfs.addFile(files[1]);
 +    createCompleteTableWrite(mfs, files, 2, time, tbl, HiveMetaStore.MM_WRITE_OPEN);
 +    mfs.addFile(files[3]);
 +    tbl.setMmNextWriteId(4);
 +    objectStore.alterTable(DB1, TABLE1, tbl);
 +
 +    mct.runOneIteration(objectStore);
 +    List<Long> writes = getAbortedWrites();
 +    assertEquals(0, writes.size()); // Missing write is not aborted before timeout.
 +    time.value = 4; // Advance time.
 +    mct.runOneIteration(objectStore);
 +    writes = getAbortedWrites();
 +    assertEquals(1, writes.size()); // Missing write is aborted after timeout.
 +    assertEquals(1L, writes.get(0).longValue());
 +    checkDeletedSet(files, 1);
 +    // However, write #3 was not aborted as we cannot determine when it will time out.
 +    createCompleteTableWrite(mfs, files, 4, time, tbl, HiveMetaStore.MM_WRITE_OPEN);
 +    time.value = 8;
 +    // It will now be aborted, since we have a following write.
 +    mct.runOneIteration(objectStore);
 +    writes = getAbortedWrites();
 +    assertEquals(2, writes.size());
 +    assertTrue(writes.contains(Long.valueOf(3)));
 +    checkDeletedSet(files, 1, 3);
 +
 +    // Commit #0 and #2 and confirm that the watermark advances.
 +    // It will only advance over #1, since #3 was aborted at 8 and grace period has not passed.
 +    time.value = 10;
 +    MTableWrite tw = objectStore.getTableWrite(DB1, TABLE1, 0);
 +    tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_COMMITTED));
 +    objectStore.updateTableWrite(tw);
 +    tw = objectStore.getTableWrite(DB1, TABLE1, 2);
 +    tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_COMMITTED));
 +    objectStore.updateTableWrite(tw);
 +    mct.runOneIteration(objectStore);
 +    writes = getAbortedWrites();
 +    assertEquals(1, writes.size());
 +    assertEquals(3L, writes.get(0).longValue());
 +    tbl = objectStore.getTable(DB1, TABLE1);
 +    assertEquals(2L, tbl.getMmWatermarkWriteId());
 +
 +    // Now advance the time and see that watermark also advances over #3.
 +    time.value = 16;
 +    mct.runOneIteration(objectStore);
 +    writes = getAbortedWrites();
 +    assertEquals(0, writes.size());
 +    tbl = objectStore.getTable(DB1, TABLE1);
 +    assertEquals(3L, tbl.getMmWatermarkWriteId());
 +
 +    // Check that the open write gets aborted after some time; then the watermark advances.
 +    time.value = 25;
 +    mct.runOneIteration(objectStore);
 +    writes = getAbortedWrites();
 +    assertEquals(1, writes.size());
 +    assertEquals(4L, writes.get(0).longValue());
 +    time.value = 31;
 +    mct.runOneIteration(objectStore);
 +    tbl = objectStore.getTable(DB1, TABLE1);
 +    assertEquals(4L, tbl.getMmWatermarkWriteId());
 +    checkDeletedSet(files, 1, 3, 4); // The other two should still be deleted.
 +
 +    // Finally check that we cannot advance watermark if cleanup fails for some file.
 +    createCompleteTableWrite(mfs, files, 5, time, tbl, HiveMetaStore.MM_WRITE_ABORTED);
 +    createCompleteTableWrite(mfs, files, 6, time, tbl, HiveMetaStore.MM_WRITE_ABORTED);
 +    createCompleteTableWrite(mfs, files, 7, time, tbl, HiveMetaStore.MM_WRITE_COMMITTED);
 +    createCompleteTableWrite(mfs, files, 8, time, tbl, HiveMetaStore.MM_WRITE_ABORTED);
 +    time.value = 37; // Skip the grace period.
 +    files[6].cannotDelete = true;
 +    mct.runOneIteration(objectStore);
 +    checkDeletedSet(files, 1, 3, 4, 5, 8); // The other two should still be deleted.
 +    tbl = objectStore.getTable(DB1, TABLE1);
 +    assertEquals(5L, tbl.getMmWatermarkWriteId()); // Watermark only goes up to 5.
 +    files[6].cannotDelete = false;
 +    mct.runOneIteration(objectStore);
 +    checkDeletedSet(files, 1, 3, 4, 5, 6, 8);
 +    tbl = objectStore.getTable(DB1, TABLE1);
 +    assertEquals(8L, tbl.getMmWatermarkWriteId()); // Now it advances all the way.
 +
 +    objectStore.dropTable(DB1, TABLE1);
 +    objectStore.dropDatabase(DB1);
 +  }
 +
 +  private void createCompleteTableWrite(MockFileSystem mfs, MockFile[] files,
 +      int id, LongSupplier time, Table tbl, char state) throws MetaException, InvalidObjectException {
 +    objectStore.createTableWrite(tbl, id, state, time.value);
 +    mfs.addFile(files[id]);
 +    tbl.setMmNextWriteId(id + 1);
 +    objectStore.alterTable(DB1, TABLE1, tbl);
 +  }
 +
 +  private void checkDeletedSet(MockFile[] files, int... deleted) {
 +    for (int id : deleted) {
 +      assertTrue("File " + id + " not deleted", files[id].isDeleted);
 +    }
 +    int count = 0;
 +    for (MockFile file : files) {
 +      if (file.isDeleted) ++count;
 +    }
 +    assertEquals(deleted.length, count); // Make sure nothing else is deleted.
 +  }
 +
 +  private List<Long> getAbortedWrites() throws MetaException {
 +    return objectStore.getTableWriteIds(DB1, TABLE1, -1, 10, HiveMetaStore.MM_WRITE_ABORTED);
 +  }
 +
 +  private StorageDescriptor createFakeSd(String location) {
 +    return new StorageDescriptor(null, location, null, null, false, 0,
 +        new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
 +  }
 +
 +
    /**
     * Tests partition operations
     */
@@@ -525,4 -413,15 +582,15 @@@
      } catch (NoSuchObjectException e) {
      }
    }
- }
+ 
+   @Test
+   public void testQueryCloseOnError() throws Exception {
+     ObjectStore spy = Mockito.spy(objectStore);
+     spy.getAllDatabases();
+     spy.getAllFunctions();
+     spy.getAllTables(DB1);
+     spy.getPartitionCount();
+     Mockito.verify(spy, Mockito.times(3))
+         .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.<Query>anyObject());
+   }
 -}
++}

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/Context.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 0b615cd,29cce9a..7b74bd5
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@@ -37,12 -37,13 +37,14 @@@ import java.util.Set
  import java.util.concurrent.TimeUnit;
  import java.util.concurrent.locks.ReentrantLock;
  
- import com.google.common.collect.Iterables;
+ import com.google.common.annotations.VisibleForTesting;
  import org.apache.commons.lang.StringUtils;
 -
 +import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FSDataInputStream;
+ import org.apache.hadoop.hive.common.JavaUtils;
+ import org.apache.hadoop.hive.common.ValidReadTxnList;
  import org.apache.hadoop.hive.common.ValidTxnList;
 +import org.apache.hadoop.hive.common.ValidWriteIds;
  import org.apache.hadoop.hive.common.metrics.common.Metrics;
  import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
  import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
@@@ -71,15 -69,11 +73,12 @@@ import org.apache.hadoop.hive.ql.hooks.
  import org.apache.hadoop.hive.ql.hooks.Hook;
  import org.apache.hadoop.hive.ql.hooks.HookContext;
  import org.apache.hadoop.hive.ql.hooks.HookUtils;
- import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook;
+ import org.apache.hadoop.hive.ql.hooks.HooksLoader;
  import org.apache.hadoop.hive.ql.hooks.PostExecute;
  import org.apache.hadoop.hive.ql.hooks.PreExecute;
- import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook;
- import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
- import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl;
  import org.apache.hadoop.hive.ql.hooks.ReadEntity;
  import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 +import org.apache.hadoop.hive.ql.io.AcidUtils;
  import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
  import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
  import org.apache.hadoop.hive.ql.lockmgr.LockException;
@@@ -748,7 -807,7 +812,7 @@@ public class Driver implements CommandP
      }
  
      // The following union operation returns a union, which traverses over the
--    // first set once and then  then over each element of second set, in order, 
++    // first set once and then  then over each element of second set, in order,
      // that is not contained in first. This means it doesn't replace anything
      // in first set, and would preserve the WriteType in WriteEntity in first
      // set in case of outputs list.
@@@ -1137,21 -1196,12 +1201,12 @@@
            desc.setStatementId(txnMgr.getWriteIdAndIncrement());
          }
        }
-       /*Note, we have to record snapshot after lock acquisition to prevent lost update problem
-       consider 2 concurrent "update table T set x = x + 1".  1st will get the locks and the
-       2nd will block until 1st one commits and only then lock in the snapshot, i.e. it will
-       see the changes made by 1st one.  This takes care of autoCommit=true case.
-       For multi-stmt txns this is not sufficient and will be managed via WriteSet tracking
-       in the lock manager.*/
 -      /*It's imperative that {@code acquireLocks()} is called for all commands so that 
++      /*It's imperative that {@code acquireLocks()} is called for all commands so that
+       HiveTxnManager can transition its state machine correctly*/
        txnMgr.acquireLocks(plan, ctx, userFromUGI, lDrvState);
-       if(initiatingTransaction || (readOnlyQueryInAutoCommit && acidInQuery)) {
-         //For multi-stmt txns we should record the snapshot when txn starts but
-         // don't update it after that until txn completes.  Thus the check for {@code initiatingTransaction}
-         //For autoCommit=true, Read-only statements, txn is implicit, i.e. lock in the snapshot
-         //for each statement.
+       if(txnMgr.recordSnapshot(plan)) {
          recordValidTxns();
        }
- 
        return 0;
      } catch (Exception e) {
        errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage();

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index 4102d02,13750cd..a3e4c9f
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@@ -737,8 -699,16 +741,20 @@@ public class FetchOperator implements S
        return inputFormat.getRecordReader(getInputSplit(), job, Reporter.NULL);
      }
    }
 -  
 +
+   private static class FetchInputFormatSplitComparator implements Comparator<FetchInputFormatSplit> {
+     @Override
+     public int compare(FetchInputFormatSplit a, FetchInputFormatSplit b) {
+       final Path ap = a.getPath();
+       final Path bp = b.getPath();
+       if (ap != null) {
+         return (ap.compareTo(bp));
+       }
+       return Long.signum(a.getLength() - b.getLength());
+     }
+   }
++
 +  public Configuration getJobConf() {
 +    return job;
 +  }
  }


[45/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 4997c51,4fb7183..a750a1c
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@@ -11256,14 -11092,14 +11316,14 @@@ class ThriftHiveMetastore_get_databases
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size604 = 0;
-             $_etype607 = 0;
-             $xfer += $input->readListBegin($_etype607, $_size604);
-             for ($_i608 = 0; $_i608 < $_size604; ++$_i608)
 -            $_size596 = 0;
 -            $_etype599 = 0;
 -            $xfer += $input->readListBegin($_etype599, $_size596);
 -            for ($_i600 = 0; $_i600 < $_size596; ++$_i600)
++            $_size603 = 0;
++            $_etype606 = 0;
++            $xfer += $input->readListBegin($_etype606, $_size603);
++            for ($_i607 = 0; $_i607 < $_size603; ++$_i607)
              {
-               $elem609 = null;
-               $xfer += $input->readString($elem609);
-               $this->success []= $elem609;
 -              $elem601 = null;
 -              $xfer += $input->readString($elem601);
 -              $this->success []= $elem601;
++              $elem608 = null;
++              $xfer += $input->readString($elem608);
++              $this->success []= $elem608;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -11299,9 -11135,9 +11359,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter610)
 -          foreach ($this->success as $iter602)
++          foreach ($this->success as $iter609)
            {
-             $xfer += $output->writeString($iter610);
 -            $xfer += $output->writeString($iter602);
++            $xfer += $output->writeString($iter609);
            }
          }
          $output->writeListEnd();
@@@ -11432,14 -11268,14 +11492,14 @@@ class ThriftHiveMetastore_get_all_datab
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size611 = 0;
-             $_etype614 = 0;
-             $xfer += $input->readListBegin($_etype614, $_size611);
-             for ($_i615 = 0; $_i615 < $_size611; ++$_i615)
 -            $_size603 = 0;
 -            $_etype606 = 0;
 -            $xfer += $input->readListBegin($_etype606, $_size603);
 -            for ($_i607 = 0; $_i607 < $_size603; ++$_i607)
++            $_size610 = 0;
++            $_etype613 = 0;
++            $xfer += $input->readListBegin($_etype613, $_size610);
++            for ($_i614 = 0; $_i614 < $_size610; ++$_i614)
              {
-               $elem616 = null;
-               $xfer += $input->readString($elem616);
-               $this->success []= $elem616;
 -              $elem608 = null;
 -              $xfer += $input->readString($elem608);
 -              $this->success []= $elem608;
++              $elem615 = null;
++              $xfer += $input->readString($elem615);
++              $this->success []= $elem615;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -11475,9 -11311,9 +11535,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter617)
 -          foreach ($this->success as $iter609)
++          foreach ($this->success as $iter616)
            {
-             $xfer += $output->writeString($iter617);
 -            $xfer += $output->writeString($iter609);
++            $xfer += $output->writeString($iter616);
            }
          }
          $output->writeListEnd();
@@@ -12478,18 -12314,18 +12538,18 @@@ class ThriftHiveMetastore_get_type_all_
          case 0:
            if ($ftype == TType::MAP) {
              $this->success = array();
-             $_size618 = 0;
-             $_ktype619 = 0;
-             $_vtype620 = 0;
-             $xfer += $input->readMapBegin($_ktype619, $_vtype620, $_size618);
-             for ($_i622 = 0; $_i622 < $_size618; ++$_i622)
 -            $_size610 = 0;
 -            $_ktype611 = 0;
 -            $_vtype612 = 0;
 -            $xfer += $input->readMapBegin($_ktype611, $_vtype612, $_size610);
 -            for ($_i614 = 0; $_i614 < $_size610; ++$_i614)
++            $_size617 = 0;
++            $_ktype618 = 0;
++            $_vtype619 = 0;
++            $xfer += $input->readMapBegin($_ktype618, $_vtype619, $_size617);
++            for ($_i621 = 0; $_i621 < $_size617; ++$_i621)
              {
-               $key623 = '';
-               $val624 = new \metastore\Type();
-               $xfer += $input->readString($key623);
-               $val624 = new \metastore\Type();
-               $xfer += $val624->read($input);
-               $this->success[$key623] = $val624;
 -              $key615 = '';
 -              $val616 = new \metastore\Type();
 -              $xfer += $input->readString($key615);
 -              $val616 = new \metastore\Type();
 -              $xfer += $val616->read($input);
 -              $this->success[$key615] = $val616;
++              $key622 = '';
++              $val623 = new \metastore\Type();
++              $xfer += $input->readString($key622);
++              $val623 = new \metastore\Type();
++              $xfer += $val623->read($input);
++              $this->success[$key622] = $val623;
              }
              $xfer += $input->readMapEnd();
            } else {
@@@ -12525,10 -12361,10 +12585,10 @@@
        {
          $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $kiter625 => $viter626)
 -          foreach ($this->success as $kiter617 => $viter618)
++          foreach ($this->success as $kiter624 => $viter625)
            {
-             $xfer += $output->writeString($kiter625);
-             $xfer += $viter626->write($output);
 -            $xfer += $output->writeString($kiter617);
 -            $xfer += $viter618->write($output);
++            $xfer += $output->writeString($kiter624);
++            $xfer += $viter625->write($output);
            }
          }
          $output->writeMapEnd();
@@@ -12732,15 -12568,15 +12792,15 @@@ class ThriftHiveMetastore_get_fields_re
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size627 = 0;
-             $_etype630 = 0;
-             $xfer += $input->readListBegin($_etype630, $_size627);
-             for ($_i631 = 0; $_i631 < $_size627; ++$_i631)
 -            $_size619 = 0;
 -            $_etype622 = 0;
 -            $xfer += $input->readListBegin($_etype622, $_size619);
 -            for ($_i623 = 0; $_i623 < $_size619; ++$_i623)
++            $_size626 = 0;
++            $_etype629 = 0;
++            $xfer += $input->readListBegin($_etype629, $_size626);
++            for ($_i630 = 0; $_i630 < $_size626; ++$_i630)
              {
-               $elem632 = null;
-               $elem632 = new \metastore\FieldSchema();
-               $xfer += $elem632->read($input);
-               $this->success []= $elem632;
 -              $elem624 = null;
 -              $elem624 = new \metastore\FieldSchema();
 -              $xfer += $elem624->read($input);
 -              $this->success []= $elem624;
++              $elem631 = null;
++              $elem631 = new \metastore\FieldSchema();
++              $xfer += $elem631->read($input);
++              $this->success []= $elem631;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -12792,9 -12628,9 +12852,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter633)
 -          foreach ($this->success as $iter625)
++          foreach ($this->success as $iter632)
            {
-             $xfer += $iter633->write($output);
 -            $xfer += $iter625->write($output);
++            $xfer += $iter632->write($output);
            }
          }
          $output->writeListEnd();
@@@ -13036,15 -12872,15 +13096,15 @@@ class ThriftHiveMetastore_get_fields_wi
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size634 = 0;
-             $_etype637 = 0;
-             $xfer += $input->readListBegin($_etype637, $_size634);
-             for ($_i638 = 0; $_i638 < $_size634; ++$_i638)
 -            $_size626 = 0;
 -            $_etype629 = 0;
 -            $xfer += $input->readListBegin($_etype629, $_size626);
 -            for ($_i630 = 0; $_i630 < $_size626; ++$_i630)
++            $_size633 = 0;
++            $_etype636 = 0;
++            $xfer += $input->readListBegin($_etype636, $_size633);
++            for ($_i637 = 0; $_i637 < $_size633; ++$_i637)
              {
-               $elem639 = null;
-               $elem639 = new \metastore\FieldSchema();
-               $xfer += $elem639->read($input);
-               $this->success []= $elem639;
 -              $elem631 = null;
 -              $elem631 = new \metastore\FieldSchema();
 -              $xfer += $elem631->read($input);
 -              $this->success []= $elem631;
++              $elem638 = null;
++              $elem638 = new \metastore\FieldSchema();
++              $xfer += $elem638->read($input);
++              $this->success []= $elem638;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -13096,9 -12932,9 +13156,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter640)
 -          foreach ($this->success as $iter632)
++          foreach ($this->success as $iter639)
            {
-             $xfer += $iter640->write($output);
 -            $xfer += $iter632->write($output);
++            $xfer += $iter639->write($output);
            }
          }
          $output->writeListEnd();
@@@ -13312,15 -13148,15 +13372,15 @@@ class ThriftHiveMetastore_get_schema_re
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size641 = 0;
-             $_etype644 = 0;
-             $xfer += $input->readListBegin($_etype644, $_size641);
-             for ($_i645 = 0; $_i645 < $_size641; ++$_i645)
 -            $_size633 = 0;
 -            $_etype636 = 0;
 -            $xfer += $input->readListBegin($_etype636, $_size633);
 -            for ($_i637 = 0; $_i637 < $_size633; ++$_i637)
++            $_size640 = 0;
++            $_etype643 = 0;
++            $xfer += $input->readListBegin($_etype643, $_size640);
++            for ($_i644 = 0; $_i644 < $_size640; ++$_i644)
              {
-               $elem646 = null;
-               $elem646 = new \metastore\FieldSchema();
-               $xfer += $elem646->read($input);
-               $this->success []= $elem646;
 -              $elem638 = null;
 -              $elem638 = new \metastore\FieldSchema();
 -              $xfer += $elem638->read($input);
 -              $this->success []= $elem638;
++              $elem645 = null;
++              $elem645 = new \metastore\FieldSchema();
++              $xfer += $elem645->read($input);
++              $this->success []= $elem645;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -13372,9 -13208,9 +13432,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter647)
 -          foreach ($this->success as $iter639)
++          foreach ($this->success as $iter646)
            {
-             $xfer += $iter647->write($output);
 -            $xfer += $iter639->write($output);
++            $xfer += $iter646->write($output);
            }
          }
          $output->writeListEnd();
@@@ -13616,15 -13452,15 +13676,15 @@@ class ThriftHiveMetastore_get_schema_wi
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size648 = 0;
-             $_etype651 = 0;
-             $xfer += $input->readListBegin($_etype651, $_size648);
-             for ($_i652 = 0; $_i652 < $_size648; ++$_i652)
 -            $_size640 = 0;
 -            $_etype643 = 0;
 -            $xfer += $input->readListBegin($_etype643, $_size640);
 -            for ($_i644 = 0; $_i644 < $_size640; ++$_i644)
++            $_size647 = 0;
++            $_etype650 = 0;
++            $xfer += $input->readListBegin($_etype650, $_size647);
++            for ($_i651 = 0; $_i651 < $_size647; ++$_i651)
              {
-               $elem653 = null;
-               $elem653 = new \metastore\FieldSchema();
-               $xfer += $elem653->read($input);
-               $this->success []= $elem653;
 -              $elem645 = null;
 -              $elem645 = new \metastore\FieldSchema();
 -              $xfer += $elem645->read($input);
 -              $this->success []= $elem645;
++              $elem652 = null;
++              $elem652 = new \metastore\FieldSchema();
++              $xfer += $elem652->read($input);
++              $this->success []= $elem652;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -13676,9 -13512,9 +13736,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter654)
 -          foreach ($this->success as $iter646)
++          foreach ($this->success as $iter653)
            {
-             $xfer += $iter654->write($output);
 -            $xfer += $iter646->write($output);
++            $xfer += $iter653->write($output);
            }
          }
          $output->writeListEnd();
@@@ -14286,15 -14122,15 +14346,15 @@@ class ThriftHiveMetastore_create_table_
          case 2:
            if ($ftype == TType::LST) {
              $this->primaryKeys = array();
-             $_size655 = 0;
-             $_etype658 = 0;
-             $xfer += $input->readListBegin($_etype658, $_size655);
-             for ($_i659 = 0; $_i659 < $_size655; ++$_i659)
 -            $_size647 = 0;
 -            $_etype650 = 0;
 -            $xfer += $input->readListBegin($_etype650, $_size647);
 -            for ($_i651 = 0; $_i651 < $_size647; ++$_i651)
++            $_size654 = 0;
++            $_etype657 = 0;
++            $xfer += $input->readListBegin($_etype657, $_size654);
++            for ($_i658 = 0; $_i658 < $_size654; ++$_i658)
              {
-               $elem660 = null;
-               $elem660 = new \metastore\SQLPrimaryKey();
-               $xfer += $elem660->read($input);
-               $this->primaryKeys []= $elem660;
 -              $elem652 = null;
 -              $elem652 = new \metastore\SQLPrimaryKey();
 -              $xfer += $elem652->read($input);
 -              $this->primaryKeys []= $elem652;
++              $elem659 = null;
++              $elem659 = new \metastore\SQLPrimaryKey();
++              $xfer += $elem659->read($input);
++              $this->primaryKeys []= $elem659;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -14304,15 -14140,15 +14364,15 @@@
          case 3:
            if ($ftype == TType::LST) {
              $this->foreignKeys = array();
-             $_size661 = 0;
-             $_etype664 = 0;
-             $xfer += $input->readListBegin($_etype664, $_size661);
-             for ($_i665 = 0; $_i665 < $_size661; ++$_i665)
 -            $_size653 = 0;
 -            $_etype656 = 0;
 -            $xfer += $input->readListBegin($_etype656, $_size653);
 -            for ($_i657 = 0; $_i657 < $_size653; ++$_i657)
++            $_size660 = 0;
++            $_etype663 = 0;
++            $xfer += $input->readListBegin($_etype663, $_size660);
++            for ($_i664 = 0; $_i664 < $_size660; ++$_i664)
              {
-               $elem666 = null;
-               $elem666 = new \metastore\SQLForeignKey();
-               $xfer += $elem666->read($input);
-               $this->foreignKeys []= $elem666;
 -              $elem658 = null;
 -              $elem658 = new \metastore\SQLForeignKey();
 -              $xfer += $elem658->read($input);
 -              $this->foreignKeys []= $elem658;
++              $elem665 = null;
++              $elem665 = new \metastore\SQLForeignKey();
++              $xfer += $elem665->read($input);
++              $this->foreignKeys []= $elem665;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -14348,9 -14184,9 +14408,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->primaryKeys));
          {
-           foreach ($this->primaryKeys as $iter667)
 -          foreach ($this->primaryKeys as $iter659)
++          foreach ($this->primaryKeys as $iter666)
            {
-             $xfer += $iter667->write($output);
 -            $xfer += $iter659->write($output);
++            $xfer += $iter666->write($output);
            }
          }
          $output->writeListEnd();
@@@ -14365,9 -14201,9 +14425,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->foreignKeys));
          {
-           foreach ($this->foreignKeys as $iter668)
 -          foreach ($this->foreignKeys as $iter660)
++          foreach ($this->foreignKeys as $iter667)
            {
-             $xfer += $iter668->write($output);
 -            $xfer += $iter660->write($output);
++            $xfer += $iter667->write($output);
            }
          }
          $output->writeListEnd();
@@@ -15553,6 -15389,230 +15613,230 @@@ class ThriftHiveMetastore_drop_table_wi
  
  }
  
+ class ThriftHiveMetastore_truncate_table_args {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $dbName = null;
+   /**
+    * @var string
+    */
+   public $tableName = null;
+   /**
+    * @var string[]
+    */
+   public $partNames = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'dbName',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'tableName',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'partNames',
+           'type' => TType::LST,
+           'etype' => TType::STRING,
+           'elem' => array(
+             'type' => TType::STRING,
+             ),
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['dbName'])) {
+         $this->dbName = $vals['dbName'];
+       }
+       if (isset($vals['tableName'])) {
+         $this->tableName = $vals['tableName'];
+       }
+       if (isset($vals['partNames'])) {
+         $this->partNames = $vals['partNames'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'ThriftHiveMetastore_truncate_table_args';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->dbName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->tableName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::LST) {
+             $this->partNames = array();
 -            $_size661 = 0;
 -            $_etype664 = 0;
 -            $xfer += $input->readListBegin($_etype664, $_size661);
 -            for ($_i665 = 0; $_i665 < $_size661; ++$_i665)
++            $_size668 = 0;
++            $_etype671 = 0;
++            $xfer += $input->readListBegin($_etype671, $_size668);
++            for ($_i672 = 0; $_i672 < $_size668; ++$_i672)
+             {
 -              $elem666 = null;
 -              $xfer += $input->readString($elem666);
 -              $this->partNames []= $elem666;
++              $elem673 = null;
++              $xfer += $input->readString($elem673);
++              $this->partNames []= $elem673;
+             }
+             $xfer += $input->readListEnd();
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('ThriftHiveMetastore_truncate_table_args');
+     if ($this->dbName !== null) {
+       $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
+       $xfer += $output->writeString($this->dbName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->tableName !== null) {
+       $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2);
+       $xfer += $output->writeString($this->tableName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->partNames !== null) {
+       if (!is_array($this->partNames)) {
+         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+       }
+       $xfer += $output->writeFieldBegin('partNames', TType::LST, 3);
+       {
+         $output->writeListBegin(TType::STRING, count($this->partNames));
+         {
 -          foreach ($this->partNames as $iter667)
++          foreach ($this->partNames as $iter674)
+           {
 -            $xfer += $output->writeString($iter667);
++            $xfer += $output->writeString($iter674);
+           }
+         }
+         $output->writeListEnd();
+       }
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class ThriftHiveMetastore_truncate_table_result {
+   static $_TSPEC;
+ 
+   /**
+    * @var \metastore\MetaException
+    */
+   public $o1 = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'o1',
+           'type' => TType::STRUCT,
+           'class' => '\metastore\MetaException',
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['o1'])) {
+         $this->o1 = $vals['o1'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'ThriftHiveMetastore_truncate_table_result';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRUCT) {
+             $this->o1 = new \metastore\MetaException();
+             $xfer += $this->o1->read($input);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('ThriftHiveMetastore_truncate_table_result');
+     if ($this->o1 !== null) {
+       $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+       $xfer += $this->o1->write($output);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
  class ThriftHiveMetastore_get_tables_args {
    static $_TSPEC;
  
@@@ -15713,14 -15773,14 +15997,14 @@@ class ThriftHiveMetastore_get_tables_re
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size669 = 0;
-             $_etype672 = 0;
-             $xfer += $input->readListBegin($_etype672, $_size669);
-             for ($_i673 = 0; $_i673 < $_size669; ++$_i673)
 -            $_size668 = 0;
 -            $_etype671 = 0;
 -            $xfer += $input->readListBegin($_etype671, $_size668);
 -            for ($_i672 = 0; $_i672 < $_size668; ++$_i672)
++            $_size675 = 0;
++            $_etype678 = 0;
++            $xfer += $input->readListBegin($_etype678, $_size675);
++            for ($_i679 = 0; $_i679 < $_size675; ++$_i679)
              {
-               $elem674 = null;
-               $xfer += $input->readString($elem674);
-               $this->success []= $elem674;
 -              $elem673 = null;
 -              $xfer += $input->readString($elem673);
 -              $this->success []= $elem673;
++              $elem680 = null;
++              $xfer += $input->readString($elem680);
++              $this->success []= $elem680;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -15756,9 -15816,9 +16040,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter675)
 -          foreach ($this->success as $iter674)
++          foreach ($this->success as $iter681)
            {
-             $xfer += $output->writeString($iter675);
 -            $xfer += $output->writeString($iter674);
++            $xfer += $output->writeString($iter681);
            }
          }
          $output->writeListEnd();
@@@ -15960,14 -16020,14 +16244,14 @@@ class ThriftHiveMetastore_get_tables_by
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size676 = 0;
-             $_etype679 = 0;
-             $xfer += $input->readListBegin($_etype679, $_size676);
-             for ($_i680 = 0; $_i680 < $_size676; ++$_i680)
 -            $_size675 = 0;
 -            $_etype678 = 0;
 -            $xfer += $input->readListBegin($_etype678, $_size675);
 -            for ($_i679 = 0; $_i679 < $_size675; ++$_i679)
++            $_size682 = 0;
++            $_etype685 = 0;
++            $xfer += $input->readListBegin($_etype685, $_size682);
++            for ($_i686 = 0; $_i686 < $_size682; ++$_i686)
              {
-               $elem681 = null;
-               $xfer += $input->readString($elem681);
-               $this->success []= $elem681;
 -              $elem680 = null;
 -              $xfer += $input->readString($elem680);
 -              $this->success []= $elem680;
++              $elem687 = null;
++              $xfer += $input->readString($elem687);
++              $this->success []= $elem687;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -16003,9 -16063,9 +16287,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter682)
 -          foreach ($this->success as $iter681)
++          foreach ($this->success as $iter688)
            {
-             $xfer += $output->writeString($iter682);
 -            $xfer += $output->writeString($iter681);
++            $xfer += $output->writeString($iter688);
            }
          }
          $output->writeListEnd();
@@@ -16110,14 -16170,14 +16394,14 @@@ class ThriftHiveMetastore_get_table_met
          case 3:
            if ($ftype == TType::LST) {
              $this->tbl_types = array();
-             $_size683 = 0;
-             $_etype686 = 0;
-             $xfer += $input->readListBegin($_etype686, $_size683);
-             for ($_i687 = 0; $_i687 < $_size683; ++$_i687)
 -            $_size682 = 0;
 -            $_etype685 = 0;
 -            $xfer += $input->readListBegin($_etype685, $_size682);
 -            for ($_i686 = 0; $_i686 < $_size682; ++$_i686)
++            $_size689 = 0;
++            $_etype692 = 0;
++            $xfer += $input->readListBegin($_etype692, $_size689);
++            for ($_i693 = 0; $_i693 < $_size689; ++$_i693)
              {
-               $elem688 = null;
-               $xfer += $input->readString($elem688);
-               $this->tbl_types []= $elem688;
 -              $elem687 = null;
 -              $xfer += $input->readString($elem687);
 -              $this->tbl_types []= $elem687;
++              $elem694 = null;
++              $xfer += $input->readString($elem694);
++              $this->tbl_types []= $elem694;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -16155,9 -16215,9 +16439,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->tbl_types));
          {
-           foreach ($this->tbl_types as $iter689)
 -          foreach ($this->tbl_types as $iter688)
++          foreach ($this->tbl_types as $iter695)
            {
-             $xfer += $output->writeString($iter689);
 -            $xfer += $output->writeString($iter688);
++            $xfer += $output->writeString($iter695);
            }
          }
          $output->writeListEnd();
@@@ -16234,15 -16294,15 +16518,15 @@@ class ThriftHiveMetastore_get_table_met
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size690 = 0;
-             $_etype693 = 0;
-             $xfer += $input->readListBegin($_etype693, $_size690);
-             for ($_i694 = 0; $_i694 < $_size690; ++$_i694)
 -            $_size689 = 0;
 -            $_etype692 = 0;
 -            $xfer += $input->readListBegin($_etype692, $_size689);
 -            for ($_i693 = 0; $_i693 < $_size689; ++$_i693)
++            $_size696 = 0;
++            $_etype699 = 0;
++            $xfer += $input->readListBegin($_etype699, $_size696);
++            for ($_i700 = 0; $_i700 < $_size696; ++$_i700)
              {
-               $elem695 = null;
-               $elem695 = new \metastore\TableMeta();
-               $xfer += $elem695->read($input);
-               $this->success []= $elem695;
 -              $elem694 = null;
 -              $elem694 = new \metastore\TableMeta();
 -              $xfer += $elem694->read($input);
 -              $this->success []= $elem694;
++              $elem701 = null;
++              $elem701 = new \metastore\TableMeta();
++              $xfer += $elem701->read($input);
++              $this->success []= $elem701;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -16278,9 -16338,9 +16562,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter696)
 -          foreach ($this->success as $iter695)
++          foreach ($this->success as $iter702)
            {
-             $xfer += $iter696->write($output);
 -            $xfer += $iter695->write($output);
++            $xfer += $iter702->write($output);
            }
          }
          $output->writeListEnd();
@@@ -16436,14 -16496,14 +16720,14 @@@ class ThriftHiveMetastore_get_all_table
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size697 = 0;
-             $_etype700 = 0;
-             $xfer += $input->readListBegin($_etype700, $_size697);
-             for ($_i701 = 0; $_i701 < $_size697; ++$_i701)
 -            $_size696 = 0;
 -            $_etype699 = 0;
 -            $xfer += $input->readListBegin($_etype699, $_size696);
 -            for ($_i700 = 0; $_i700 < $_size696; ++$_i700)
++            $_size703 = 0;
++            $_etype706 = 0;
++            $xfer += $input->readListBegin($_etype706, $_size703);
++            for ($_i707 = 0; $_i707 < $_size703; ++$_i707)
              {
-               $elem702 = null;
-               $xfer += $input->readString($elem702);
-               $this->success []= $elem702;
 -              $elem701 = null;
 -              $xfer += $input->readString($elem701);
 -              $this->success []= $elem701;
++              $elem708 = null;
++              $xfer += $input->readString($elem708);
++              $this->success []= $elem708;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -16479,9 -16539,9 +16763,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter703)
 -          foreach ($this->success as $iter702)
++          foreach ($this->success as $iter709)
            {
-             $xfer += $output->writeString($iter703);
 -            $xfer += $output->writeString($iter702);
++            $xfer += $output->writeString($iter709);
            }
          }
          $output->writeListEnd();
@@@ -16796,14 -16856,14 +17080,14 @@@ class ThriftHiveMetastore_get_table_obj
          case 2:
            if ($ftype == TType::LST) {
              $this->tbl_names = array();
-             $_size704 = 0;
-             $_etype707 = 0;
-             $xfer += $input->readListBegin($_etype707, $_size704);
-             for ($_i708 = 0; $_i708 < $_size704; ++$_i708)
 -            $_size703 = 0;
 -            $_etype706 = 0;
 -            $xfer += $input->readListBegin($_etype706, $_size703);
 -            for ($_i707 = 0; $_i707 < $_size703; ++$_i707)
++            $_size710 = 0;
++            $_etype713 = 0;
++            $xfer += $input->readListBegin($_etype713, $_size710);
++            for ($_i714 = 0; $_i714 < $_size710; ++$_i714)
              {
-               $elem709 = null;
-               $xfer += $input->readString($elem709);
-               $this->tbl_names []= $elem709;
 -              $elem708 = null;
 -              $xfer += $input->readString($elem708);
 -              $this->tbl_names []= $elem708;
++              $elem715 = null;
++              $xfer += $input->readString($elem715);
++              $this->tbl_names []= $elem715;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -16836,9 -16896,9 +17120,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->tbl_names));
          {
-           foreach ($this->tbl_names as $iter710)
 -          foreach ($this->tbl_names as $iter709)
++          foreach ($this->tbl_names as $iter716)
            {
-             $xfer += $output->writeString($iter710);
 -            $xfer += $output->writeString($iter709);
++            $xfer += $output->writeString($iter716);
            }
          }
          $output->writeListEnd();
@@@ -16903,15 -16963,15 +17187,15 @@@ class ThriftHiveMetastore_get_table_obj
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size711 = 0;
-             $_etype714 = 0;
-             $xfer += $input->readListBegin($_etype714, $_size711);
-             for ($_i715 = 0; $_i715 < $_size711; ++$_i715)
 -            $_size710 = 0;
 -            $_etype713 = 0;
 -            $xfer += $input->readListBegin($_etype713, $_size710);
 -            for ($_i714 = 0; $_i714 < $_size710; ++$_i714)
++            $_size717 = 0;
++            $_etype720 = 0;
++            $xfer += $input->readListBegin($_etype720, $_size717);
++            for ($_i721 = 0; $_i721 < $_size717; ++$_i721)
              {
-               $elem716 = null;
-               $elem716 = new \metastore\Table();
-               $xfer += $elem716->read($input);
-               $this->success []= $elem716;
 -              $elem715 = null;
 -              $elem715 = new \metastore\Table();
 -              $xfer += $elem715->read($input);
 -              $this->success []= $elem715;
++              $elem722 = null;
++              $elem722 = new \metastore\Table();
++              $xfer += $elem722->read($input);
++              $this->success []= $elem722;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -16939,9 -16999,9 +17223,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter717)
 -          foreach ($this->success as $iter716)
++          foreach ($this->success as $iter723)
            {
-             $xfer += $iter717->write($output);
 -            $xfer += $iter716->write($output);
++            $xfer += $iter723->write($output);
            }
          }
          $output->writeListEnd();
@@@ -17607,14 -17667,14 +17891,14 @@@ class ThriftHiveMetastore_get_table_nam
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size718 = 0;
-             $_etype721 = 0;
-             $xfer += $input->readListBegin($_etype721, $_size718);
-             for ($_i722 = 0; $_i722 < $_size718; ++$_i722)
 -            $_size717 = 0;
 -            $_etype720 = 0;
 -            $xfer += $input->readListBegin($_etype720, $_size717);
 -            for ($_i721 = 0; $_i721 < $_size717; ++$_i721)
++            $_size724 = 0;
++            $_etype727 = 0;
++            $xfer += $input->readListBegin($_etype727, $_size724);
++            for ($_i728 = 0; $_i728 < $_size724; ++$_i728)
              {
-               $elem723 = null;
-               $xfer += $input->readString($elem723);
-               $this->success []= $elem723;
 -              $elem722 = null;
 -              $xfer += $input->readString($elem722);
 -              $this->success []= $elem722;
++              $elem729 = null;
++              $xfer += $input->readString($elem729);
++              $this->success []= $elem729;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -17666,9 -17726,9 +17950,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter724)
 -          foreach ($this->success as $iter723)
++          foreach ($this->success as $iter730)
            {
-             $xfer += $output->writeString($iter724);
 -            $xfer += $output->writeString($iter723);
++            $xfer += $output->writeString($iter730);
            }
          }
          $output->writeListEnd();
@@@ -18981,15 -19041,15 +19265,15 @@@ class ThriftHiveMetastore_add_partition
          case 1:
            if ($ftype == TType::LST) {
              $this->new_parts = array();
-             $_size725 = 0;
-             $_etype728 = 0;
-             $xfer += $input->readListBegin($_etype728, $_size725);
-             for ($_i729 = 0; $_i729 < $_size725; ++$_i729)
 -            $_size724 = 0;
 -            $_etype727 = 0;
 -            $xfer += $input->readListBegin($_etype727, $_size724);
 -            for ($_i728 = 0; $_i728 < $_size724; ++$_i728)
++            $_size731 = 0;
++            $_etype734 = 0;
++            $xfer += $input->readListBegin($_etype734, $_size731);
++            for ($_i735 = 0; $_i735 < $_size731; ++$_i735)
              {
-               $elem730 = null;
-               $elem730 = new \metastore\Partition();
-               $xfer += $elem730->read($input);
-               $this->new_parts []= $elem730;
 -              $elem729 = null;
 -              $elem729 = new \metastore\Partition();
 -              $xfer += $elem729->read($input);
 -              $this->new_parts []= $elem729;
++              $elem736 = null;
++              $elem736 = new \metastore\Partition();
++              $xfer += $elem736->read($input);
++              $this->new_parts []= $elem736;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -19017,9 -19077,9 +19301,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->new_parts));
          {
-           foreach ($this->new_parts as $iter731)
 -          foreach ($this->new_parts as $iter730)
++          foreach ($this->new_parts as $iter737)
            {
-             $xfer += $iter731->write($output);
 -            $xfer += $iter730->write($output);
++            $xfer += $iter737->write($output);
            }
          }
          $output->writeListEnd();
@@@ -19234,15 -19294,15 +19518,15 @@@ class ThriftHiveMetastore_add_partition
          case 1:
            if ($ftype == TType::LST) {
              $this->new_parts = array();
-             $_size732 = 0;
-             $_etype735 = 0;
-             $xfer += $input->readListBegin($_etype735, $_size732);
-             for ($_i736 = 0; $_i736 < $_size732; ++$_i736)
 -            $_size731 = 0;
 -            $_etype734 = 0;
 -            $xfer += $input->readListBegin($_etype734, $_size731);
 -            for ($_i735 = 0; $_i735 < $_size731; ++$_i735)
++            $_size738 = 0;
++            $_etype741 = 0;
++            $xfer += $input->readListBegin($_etype741, $_size738);
++            for ($_i742 = 0; $_i742 < $_size738; ++$_i742)
              {
-               $elem737 = null;
-               $elem737 = new \metastore\PartitionSpec();
-               $xfer += $elem737->read($input);
-               $this->new_parts []= $elem737;
 -              $elem736 = null;
 -              $elem736 = new \metastore\PartitionSpec();
 -              $xfer += $elem736->read($input);
 -              $this->new_parts []= $elem736;
++              $elem743 = null;
++              $elem743 = new \metastore\PartitionSpec();
++              $xfer += $elem743->read($input);
++              $this->new_parts []= $elem743;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -19270,9 -19330,9 +19554,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->new_parts));
          {
-           foreach ($this->new_parts as $iter738)
 -          foreach ($this->new_parts as $iter737)
++          foreach ($this->new_parts as $iter744)
            {
-             $xfer += $iter738->write($output);
 -            $xfer += $iter737->write($output);
++            $xfer += $iter744->write($output);
            }
          }
          $output->writeListEnd();
@@@ -19522,14 -19582,14 +19806,14 @@@ class ThriftHiveMetastore_append_partit
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size739 = 0;
-             $_etype742 = 0;
-             $xfer += $input->readListBegin($_etype742, $_size739);
-             for ($_i743 = 0; $_i743 < $_size739; ++$_i743)
 -            $_size738 = 0;
 -            $_etype741 = 0;
 -            $xfer += $input->readListBegin($_etype741, $_size738);
 -            for ($_i742 = 0; $_i742 < $_size738; ++$_i742)
++            $_size745 = 0;
++            $_etype748 = 0;
++            $xfer += $input->readListBegin($_etype748, $_size745);
++            for ($_i749 = 0; $_i749 < $_size745; ++$_i749)
              {
-               $elem744 = null;
-               $xfer += $input->readString($elem744);
-               $this->part_vals []= $elem744;
 -              $elem743 = null;
 -              $xfer += $input->readString($elem743);
 -              $this->part_vals []= $elem743;
++              $elem750 = null;
++              $xfer += $input->readString($elem750);
++              $this->part_vals []= $elem750;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -19567,9 -19627,9 +19851,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter745)
 -          foreach ($this->part_vals as $iter744)
++          foreach ($this->part_vals as $iter751)
            {
-             $xfer += $output->writeString($iter745);
 -            $xfer += $output->writeString($iter744);
++            $xfer += $output->writeString($iter751);
            }
          }
          $output->writeListEnd();
@@@ -20071,14 -20131,14 +20355,14 @@@ class ThriftHiveMetastore_append_partit
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size746 = 0;
-             $_etype749 = 0;
-             $xfer += $input->readListBegin($_etype749, $_size746);
-             for ($_i750 = 0; $_i750 < $_size746; ++$_i750)
 -            $_size745 = 0;
 -            $_etype748 = 0;
 -            $xfer += $input->readListBegin($_etype748, $_size745);
 -            for ($_i749 = 0; $_i749 < $_size745; ++$_i749)
++            $_size752 = 0;
++            $_etype755 = 0;
++            $xfer += $input->readListBegin($_etype755, $_size752);
++            for ($_i756 = 0; $_i756 < $_size752; ++$_i756)
              {
-               $elem751 = null;
-               $xfer += $input->readString($elem751);
-               $this->part_vals []= $elem751;
 -              $elem750 = null;
 -              $xfer += $input->readString($elem750);
 -              $this->part_vals []= $elem750;
++              $elem757 = null;
++              $xfer += $input->readString($elem757);
++              $this->part_vals []= $elem757;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -20124,9 -20184,9 +20408,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter752)
 -          foreach ($this->part_vals as $iter751)
++          foreach ($this->part_vals as $iter758)
            {
-             $xfer += $output->writeString($iter752);
 -            $xfer += $output->writeString($iter751);
++            $xfer += $output->writeString($iter758);
            }
          }
          $output->writeListEnd();
@@@ -20980,14 -21040,14 +21264,14 @@@ class ThriftHiveMetastore_drop_partitio
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size753 = 0;
-             $_etype756 = 0;
-             $xfer += $input->readListBegin($_etype756, $_size753);
-             for ($_i757 = 0; $_i757 < $_size753; ++$_i757)
 -            $_size752 = 0;
 -            $_etype755 = 0;
 -            $xfer += $input->readListBegin($_etype755, $_size752);
 -            for ($_i756 = 0; $_i756 < $_size752; ++$_i756)
++            $_size759 = 0;
++            $_etype762 = 0;
++            $xfer += $input->readListBegin($_etype762, $_size759);
++            for ($_i763 = 0; $_i763 < $_size759; ++$_i763)
              {
-               $elem758 = null;
-               $xfer += $input->readString($elem758);
-               $this->part_vals []= $elem758;
 -              $elem757 = null;
 -              $xfer += $input->readString($elem757);
 -              $this->part_vals []= $elem757;
++              $elem764 = null;
++              $xfer += $input->readString($elem764);
++              $this->part_vals []= $elem764;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -21032,9 -21092,9 +21316,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter759)
 -          foreach ($this->part_vals as $iter758)
++          foreach ($this->part_vals as $iter765)
            {
-             $xfer += $output->writeString($iter759);
 -            $xfer += $output->writeString($iter758);
++            $xfer += $output->writeString($iter765);
            }
          }
          $output->writeListEnd();
@@@ -21287,14 -21347,14 +21571,14 @@@ class ThriftHiveMetastore_drop_partitio
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size760 = 0;
-             $_etype763 = 0;
-             $xfer += $input->readListBegin($_etype763, $_size760);
-             for ($_i764 = 0; $_i764 < $_size760; ++$_i764)
 -            $_size759 = 0;
 -            $_etype762 = 0;
 -            $xfer += $input->readListBegin($_etype762, $_size759);
 -            for ($_i763 = 0; $_i763 < $_size759; ++$_i763)
++            $_size766 = 0;
++            $_etype769 = 0;
++            $xfer += $input->readListBegin($_etype769, $_size766);
++            for ($_i770 = 0; $_i770 < $_size766; ++$_i770)
              {
-               $elem765 = null;
-               $xfer += $input->readString($elem765);
-               $this->part_vals []= $elem765;
 -              $elem764 = null;
 -              $xfer += $input->readString($elem764);
 -              $this->part_vals []= $elem764;
++              $elem771 = null;
++              $xfer += $input->readString($elem771);
++              $this->part_vals []= $elem771;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -21347,9 -21407,9 +21631,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter766)
 -          foreach ($this->part_vals as $iter765)
++          foreach ($this->part_vals as $iter772)
            {
-             $xfer += $output->writeString($iter766);
 -            $xfer += $output->writeString($iter765);
++            $xfer += $output->writeString($iter772);
            }
          }
          $output->writeListEnd();
@@@ -22363,14 -22423,14 +22647,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size767 = 0;
-             $_etype770 = 0;
-             $xfer += $input->readListBegin($_etype770, $_size767);
-             for ($_i771 = 0; $_i771 < $_size767; ++$_i771)
 -            $_size766 = 0;
 -            $_etype769 = 0;
 -            $xfer += $input->readListBegin($_etype769, $_size766);
 -            for ($_i770 = 0; $_i770 < $_size766; ++$_i770)
++            $_size773 = 0;
++            $_etype776 = 0;
++            $xfer += $input->readListBegin($_etype776, $_size773);
++            for ($_i777 = 0; $_i777 < $_size773; ++$_i777)
              {
-               $elem772 = null;
-               $xfer += $input->readString($elem772);
-               $this->part_vals []= $elem772;
 -              $elem771 = null;
 -              $xfer += $input->readString($elem771);
 -              $this->part_vals []= $elem771;
++              $elem778 = null;
++              $xfer += $input->readString($elem778);
++              $this->part_vals []= $elem778;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -22408,9 -22468,9 +22692,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter773)
 -          foreach ($this->part_vals as $iter772)
++          foreach ($this->part_vals as $iter779)
            {
-             $xfer += $output->writeString($iter773);
 -            $xfer += $output->writeString($iter772);
++            $xfer += $output->writeString($iter779);
            }
          }
          $output->writeListEnd();
@@@ -22652,17 -22712,17 +22936,17 @@@ class ThriftHiveMetastore_exchange_part
          case 1:
            if ($ftype == TType::MAP) {
              $this->partitionSpecs = array();
-             $_size774 = 0;
-             $_ktype775 = 0;
-             $_vtype776 = 0;
-             $xfer += $input->readMapBegin($_ktype775, $_vtype776, $_size774);
-             for ($_i778 = 0; $_i778 < $_size774; ++$_i778)
 -            $_size773 = 0;
 -            $_ktype774 = 0;
 -            $_vtype775 = 0;
 -            $xfer += $input->readMapBegin($_ktype774, $_vtype775, $_size773);
 -            for ($_i777 = 0; $_i777 < $_size773; ++$_i777)
++            $_size780 = 0;
++            $_ktype781 = 0;
++            $_vtype782 = 0;
++            $xfer += $input->readMapBegin($_ktype781, $_vtype782, $_size780);
++            for ($_i784 = 0; $_i784 < $_size780; ++$_i784)
              {
-               $key779 = '';
-               $val780 = '';
-               $xfer += $input->readString($key779);
-               $xfer += $input->readString($val780);
-               $this->partitionSpecs[$key779] = $val780;
 -              $key778 = '';
 -              $val779 = '';
 -              $xfer += $input->readString($key778);
 -              $xfer += $input->readString($val779);
 -              $this->partitionSpecs[$key778] = $val779;
++              $key785 = '';
++              $val786 = '';
++              $xfer += $input->readString($key785);
++              $xfer += $input->readString($val786);
++              $this->partitionSpecs[$key785] = $val786;
              }
              $xfer += $input->readMapEnd();
            } else {
@@@ -22718,10 -22778,10 +23002,10 @@@
        {
          $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
          {
-           foreach ($this->partitionSpecs as $kiter781 => $viter782)
 -          foreach ($this->partitionSpecs as $kiter780 => $viter781)
++          foreach ($this->partitionSpecs as $kiter787 => $viter788)
            {
-             $xfer += $output->writeString($kiter781);
-             $xfer += $output->writeString($viter782);
 -            $xfer += $output->writeString($kiter780);
 -            $xfer += $output->writeString($viter781);
++            $xfer += $output->writeString($kiter787);
++            $xfer += $output->writeString($viter788);
            }
          }
          $output->writeMapEnd();
@@@ -23033,17 -23093,17 +23317,17 @@@ class ThriftHiveMetastore_exchange_part
          case 1:
            if ($ftype == TType::MAP) {
              $this->partitionSpecs = array();
-             $_size783 = 0;
-             $_ktype784 = 0;
-             $_vtype785 = 0;
-             $xfer += $input->readMapBegin($_ktype784, $_vtype785, $_size783);
-             for ($_i787 = 0; $_i787 < $_size783; ++$_i787)
 -            $_size782 = 0;
 -            $_ktype783 = 0;
 -            $_vtype784 = 0;
 -            $xfer += $input->readMapBegin($_ktype783, $_vtype784, $_size782);
 -            for ($_i786 = 0; $_i786 < $_size782; ++$_i786)
++            $_size789 = 0;
++            $_ktype790 = 0;
++            $_vtype791 = 0;
++            $xfer += $input->readMapBegin($_ktype790, $_vtype791, $_size789);
++            for ($_i793 = 0; $_i793 < $_size789; ++$_i793)
              {
-               $key788 = '';
-               $val789 = '';
-               $xfer += $input->readString($key788);
-               $xfer += $input->readString($val789);
-               $this->partitionSpecs[$key788] = $val789;
 -              $key787 = '';
 -              $val788 = '';
 -              $xfer += $input->readString($key787);
 -              $xfer += $input->readString($val788);
 -              $this->partitionSpecs[$key787] = $val788;
++              $key794 = '';
++              $val795 = '';
++              $xfer += $input->readString($key794);
++              $xfer += $input->readString($val795);
++              $this->partitionSpecs[$key794] = $val795;
              }
              $xfer += $input->readMapEnd();
            } else {
@@@ -23099,10 -23159,10 +23383,10 @@@
        {
          $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
          {
-           foreach ($this->partitionSpecs as $kiter790 => $viter791)
 -          foreach ($this->partitionSpecs as $kiter789 => $viter790)
++          foreach ($this->partitionSpecs as $kiter796 => $viter797)
            {
-             $xfer += $output->writeString($kiter790);
-             $xfer += $output->writeString($viter791);
 -            $xfer += $output->writeString($kiter789);
 -            $xfer += $output->writeString($viter790);
++            $xfer += $output->writeString($kiter796);
++            $xfer += $output->writeString($viter797);
            }
          }
          $output->writeMapEnd();
@@@ -23235,15 -23295,15 +23519,15 @@@ class ThriftHiveMetastore_exchange_part
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size792 = 0;
-             $_etype795 = 0;
-             $xfer += $input->readListBegin($_etype795, $_size792);
-             for ($_i796 = 0; $_i796 < $_size792; ++$_i796)
 -            $_size791 = 0;
 -            $_etype794 = 0;
 -            $xfer += $input->readListBegin($_etype794, $_size791);
 -            for ($_i795 = 0; $_i795 < $_size791; ++$_i795)
++            $_size798 = 0;
++            $_etype801 = 0;
++            $xfer += $input->readListBegin($_etype801, $_size798);
++            for ($_i802 = 0; $_i802 < $_size798; ++$_i802)
              {
-               $elem797 = null;
-               $elem797 = new \metastore\Partition();
-               $xfer += $elem797->read($input);
-               $this->success []= $elem797;
 -              $elem796 = null;
 -              $elem796 = new \metastore\Partition();
 -              $xfer += $elem796->read($input);
 -              $this->success []= $elem796;
++              $elem803 = null;
++              $elem803 = new \metastore\Partition();
++              $xfer += $elem803->read($input);
++              $this->success []= $elem803;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -23303,9 -23363,9 +23587,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter798)
 -          foreach ($this->success as $iter797)
++          foreach ($this->success as $iter804)
            {
-             $xfer += $iter798->write($output);
 -            $xfer += $iter797->write($output);
++            $xfer += $iter804->write($output);
            }
          }
          $output->writeListEnd();
@@@ -23451,14 -23511,14 +23735,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size799 = 0;
-             $_etype802 = 0;
-             $xfer += $input->readListBegin($_etype802, $_size799);
-             for ($_i803 = 0; $_i803 < $_size799; ++$_i803)
 -            $_size798 = 0;
 -            $_etype801 = 0;
 -            $xfer += $input->readListBegin($_etype801, $_size798);
 -            for ($_i802 = 0; $_i802 < $_size798; ++$_i802)
++            $_size805 = 0;
++            $_etype808 = 0;
++            $xfer += $input->readListBegin($_etype808, $_size805);
++            for ($_i809 = 0; $_i809 < $_size805; ++$_i809)
              {
-               $elem804 = null;
-               $xfer += $input->readString($elem804);
-               $this->part_vals []= $elem804;
 -              $elem803 = null;
 -              $xfer += $input->readString($elem803);
 -              $this->part_vals []= $elem803;
++              $elem810 = null;
++              $xfer += $input->readString($elem810);
++              $this->part_vals []= $elem810;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -23475,14 -23535,14 +23759,14 @@@
          case 5:
            if ($ftype == TType::LST) {
              $this->group_names = array();
-             $_size805 = 0;
-             $_etype808 = 0;
-             $xfer += $input->readListBegin($_etype808, $_size805);
-             for ($_i809 = 0; $_i809 < $_size805; ++$_i809)
 -            $_size804 = 0;
 -            $_etype807 = 0;
 -            $xfer += $input->readListBegin($_etype807, $_size804);
 -            for ($_i808 = 0; $_i808 < $_size804; ++$_i808)
++            $_size811 = 0;
++            $_etype814 = 0;
++            $xfer += $input->readListBegin($_etype814, $_size811);
++            for ($_i815 = 0; $_i815 < $_size811; ++$_i815)
              {
-               $elem810 = null;
-               $xfer += $input->readString($elem810);
-               $this->group_names []= $elem810;
 -              $elem809 = null;
 -              $xfer += $input->readString($elem809);
 -              $this->group_names []= $elem809;
++              $elem816 = null;
++              $xfer += $input->readString($elem816);
++              $this->group_names []= $elem816;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -23520,9 -23580,9 +23804,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter811)
 -          foreach ($this->part_vals as $iter810)
++          foreach ($this->part_vals as $iter817)
            {
-             $xfer += $output->writeString($iter811);
 -            $xfer += $output->writeString($iter810);
++            $xfer += $output->writeString($iter817);
            }
          }
          $output->writeListEnd();
@@@ -23542,9 -23602,9 +23826,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->group_names));
          {
-           foreach ($this->group_names as $iter812)
 -          foreach ($this->group_names as $iter811)
++          foreach ($this->group_names as $iter818)
            {
-             $xfer += $output->writeString($iter812);
 -            $xfer += $output->writeString($iter811);
++            $xfer += $output->writeString($iter818);
            }
          }
          $output->writeListEnd();
@@@ -24135,15 -24195,15 +24419,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size813 = 0;
-             $_etype816 = 0;
-             $xfer += $input->readListBegin($_etype816, $_size813);
-             for ($_i817 = 0; $_i817 < $_size813; ++$_i817)
 -            $_size812 = 0;
 -            $_etype815 = 0;
 -            $xfer += $input->readListBegin($_etype815, $_size812);
 -            for ($_i816 = 0; $_i816 < $_size812; ++$_i816)
++            $_size819 = 0;
++            $_etype822 = 0;
++            $xfer += $input->readListBegin($_etype822, $_size819);
++            for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
              {
-               $elem818 = null;
-               $elem818 = new \metastore\Partition();
-               $xfer += $elem818->read($input);
-               $this->success []= $elem818;
 -              $elem817 = null;
 -              $elem817 = new \metastore\Partition();
 -              $xfer += $elem817->read($input);
 -              $this->success []= $elem817;
++              $elem824 = null;
++              $elem824 = new \metastore\Partition();
++              $xfer += $elem824->read($input);
++              $this->success []= $elem824;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -24187,9 -24247,9 +24471,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter819)
 -          foreach ($this->success as $iter818)
++          foreach ($this->success as $iter825)
            {
-             $xfer += $iter819->write($output);
 -            $xfer += $iter818->write($output);
++            $xfer += $iter825->write($output);
            }
          }
          $output->writeListEnd();
@@@ -24335,14 -24395,14 +24619,14 @@@ class ThriftHiveMetastore_get_partition
          case 5:
            if ($ftype == TType::LST) {
              $this->group_names = array();
-             $_size820 = 0;
-             $_etype823 = 0;
-             $xfer += $input->readListBegin($_etype823, $_size820);
-             for ($_i824 = 0; $_i824 < $_size820; ++$_i824)
 -            $_size819 = 0;
 -            $_etype822 = 0;
 -            $xfer += $input->readListBegin($_etype822, $_size819);
 -            for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
++            $_size826 = 0;
++            $_etype829 = 0;
++            $xfer += $input->readListBegin($_etype829, $_size826);
++            for ($_i830 = 0; $_i830 < $_size826; ++$_i830)
              {
-               $elem825 = null;
-               $xfer += $input->readString($elem825);
-               $this->group_names []= $elem825;
 -              $elem824 = null;
 -              $xfer += $input->readString($elem824);
 -              $this->group_names []= $elem824;
++              $elem831 = null;
++              $xfer += $input->readString($elem831);
++              $this->group_names []= $elem831;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -24390,9 -24450,9 +24674,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->group_names));
          {
-           foreach ($this->group_names as $iter826)
 -          foreach ($this->group_names as $iter825)
++          foreach ($this->group_names as $iter832)
            {
-             $xfer += $output->writeString($iter826);
 -            $xfer += $output->writeString($iter825);
++            $xfer += $output->writeString($iter832);
            }
          }
          $output->writeListEnd();
@@@ -24481,15 -24541,15 +24765,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size827 = 0;
-             $_etype830 = 0;
-             $xfer += $input->readListBegin($_etype830, $_size827);
-             for ($_i831 = 0; $_i831 < $_size827; ++$_i831)
 -            $_size826 = 0;
 -            $_etype829 = 0;
 -            $xfer += $input->readListBegin($_etype829, $_size826);
 -            for ($_i830 = 0; $_i830 < $_size826; ++$_i830)
++            $_size833 = 0;
++            $_etype836 = 0;
++            $xfer += $input->readListBegin($_etype836, $_size833);
++            for ($_i837 = 0; $_i837 < $_size833; ++$_i837)
              {
-               $elem832 = null;
-               $elem832 = new \metastore\Partition();
-               $xfer += $elem832->read($input);
-               $this->success []= $elem832;
 -              $elem831 = null;
 -              $elem831 = new \metastore\Partition();
 -              $xfer += $elem831->read($input);
 -              $this->success []= $elem831;
++              $elem838 = null;
++              $elem838 = new \metastore\Partition();
++              $xfer += $elem838->read($input);
++              $this->success []= $elem838;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -24533,9 -24593,9 +24817,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter833)
 -          foreach ($this->success as $iter832)
++          foreach ($this->success as $iter839)
            {
-             $xfer += $iter833->write($output);
 -            $xfer += $iter832->write($output);
++            $xfer += $iter839->write($output);
            }
          }
          $output->writeListEnd();
@@@ -24755,15 -24815,15 +25039,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size834 = 0;
-             $_etype837 = 0;
-             $xfer += $input->readListBegin($_etype837, $_size834);
-             for ($_i838 = 0; $_i838 < $_size834; ++$_i838)
 -            $_size833 = 0;
 -            $_etype836 = 0;
 -            $xfer += $input->readListBegin($_etype836, $_size833);
 -            for ($_i837 = 0; $_i837 < $_size833; ++$_i837)
++            $_size840 = 0;
++            $_etype843 = 0;
++            $xfer += $input->readListBegin($_etype843, $_size840);
++            for ($_i844 = 0; $_i844 < $_size840; ++$_i844)
              {
-               $elem839 = null;
-               $elem839 = new \metastore\PartitionSpec();
-               $xfer += $elem839->read($input);
-               $this->success []= $elem839;
 -              $elem838 = null;
 -              $elem838 = new \metastore\PartitionSpec();
 -              $xfer += $elem838->read($input);
 -              $this->success []= $elem838;
++              $elem845 = null;
++              $elem845 = new \metastore\PartitionSpec();
++              $xfer += $elem845->read($input);
++              $this->success []= $elem845;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -24807,9 -24867,9 +25091,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter840)
 -          foreach ($this->success as $iter839)
++          foreach ($this->success as $iter846)
            {
-             $xfer += $iter840->write($output);
 -            $xfer += $iter839->write($output);
++            $xfer += $iter846->write($output);
            }
          }
          $output->writeListEnd();
@@@ -25016,14 -25076,14 +25300,14 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size841 = 0;
-             $_etype844 = 0;
-             $xfer += $input->readListBegin($_etype844, $_size841);
-             for ($_i845 = 0; $_i845 < $_size841; ++$_i845)
 -            $_size840 = 0;
 -            $_etype843 = 0;
 -            $xfer += $input->readListBegin($_etype843, $_size840);
 -            for ($_i844 = 0; $_i844 < $_size840; ++$_i844)
++            $_size847 = 0;
++            $_etype850 = 0;
++            $xfer += $input->readListBegin($_etype850, $_size847);
++            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
              {
-               $elem846 = null;
-               $xfer += $input->readString($elem846);
-               $this->success []= $elem846;
 -              $elem845 = null;
 -              $xfer += $input->readString($elem845);
 -              $this->success []= $elem845;
++              $elem852 = null;
++              $xfer += $input->readString($elem852);
++              $this->success []= $elem852;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -25059,9 -25119,9 +25343,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter847)
 -          foreach ($this->success as $iter846)
++          foreach ($this->success as $iter853)
            {
-             $xfer += $output->writeString($iter847);
 -            $xfer += $output->writeString($iter846);
++            $xfer += $output->writeString($iter853);
            }
          }
          $output->writeListEnd();
@@@ -25177,14 -25237,14 +25461,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size848 = 0;
-             $_etype851 = 0;
-             $xfer += $input->readListBegin($_etype851, $_size848);
-             for ($_i852 = 0; $_i852 < $_size848; ++$_i852)
 -            $_size847 = 0;
 -            $_etype850 = 0;
 -            $xfer += $input->readListBegin($_etype850, $_size847);
 -            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
++            $_size854 = 0;
++            $_etype857 = 0;
++            $xfer += $input->readListBegin($_etype857, $_size854);
++            for ($_i858 = 0; $_i858 < $_size854; ++$_i858)
              {
-               $elem853 = null;
-               $xfer += $input->readString($elem853);
-               $this->part_vals []= $elem853;
 -              $elem852 = null;
 -              $xfer += $input->readString($elem852);
 -              $this->part_vals []= $elem852;
++              $elem859 = null;
++              $xfer += $input->readString($elem859);
++              $this->part_vals []= $elem859;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -25229,9 -25289,9 +25513,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter854)
 -          foreach ($this->part_vals as $iter853)
++          foreach ($this->part_vals as $iter860)
            {
-             $xfer += $output->writeString($iter854);
 -            $xfer += $output->writeString($iter853);
++            $xfer += $output->writeString($iter860);
            }
          }
          $output->writeListEnd();
@@@ -25325,15 -25385,15 +25609,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size855 = 0;
-             $_etype858 = 0;
-             $xfer += $input->readListBegin($_etype858, $_size855);
-             for ($_i859 = 0; $_i859 < $_size855; ++$_i859)
 -            $_size854 = 0;
 -            $_etype857 = 0;
 -            $xfer += $input->readListBegin($_etype857, $_size854);
 -            for ($_i858 = 0; $_i858 < $_size854; ++$_i858)
++            $_size861 = 0;
++            $_etype864 = 0;
++            $xfer += $input->readListBegin($_etype864, $_size861);
++            for ($_i865 = 0; $_i865 < $_size861; ++$_i865)
              {
-               $elem860 = null;
-               $elem860 = new \metastore\Partition();
-               $xfer += $elem860->read($input);
-               $this->success []= $elem860;
 -              $elem859 = null;
 -              $elem859 = new \metastore\Partition();
 -              $xfer += $elem859->read($input);
 -              $this->success []= $elem859;
++              $elem866 = null;
++              $elem866 = new \metastore\Partition();
++              $xfer += $elem866->read($input);
++              $this->success []= $elem866;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -25377,9 -25437,9 +25661,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter861)
 -          foreach ($this->success as $iter860)
++          foreach ($this->success as $iter867)
            {
-             $xfer += $iter861->write($output);
 -            $xfer += $iter860->write($output);
++            $xfer += $iter867->write($output);
            }
          }
          $output->writeListEnd();
@@@ -25526,14 -25586,14 +25810,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size862 = 0;
-             $_etype865 = 0;
-             $xfer += $input->readListBegin($_etype865, $_size862);
-             for ($_i866 = 0; $_i866 < $_size862; ++$_i866)
 -            $_size861 = 0;
 -            $_etype864 = 0;
 -            $xfer += $input->readListBegin($_etype864, $_size861);
 -            for ($_i865 = 0; $_i865 < $_size861; ++$_i865)
++            $_size868 = 0;
++            $_etype871 = 0;
++            $xfer += $input->readListBegin($_etype871, $_size868);
++            for ($_i872 = 0; $_i872 < $_size868; ++$_i872)
              {
-               $elem867 = null;
-               $xfer += $input->readString($elem867);
-               $this->part_vals []= $elem867;
 -              $elem866 = null;
 -              $xfer += $input->readString($elem866);
 -              $this->part_vals []= $elem866;
++              $elem873 = null;
++              $xfer += $input->readString($elem873);
++              $this->part_vals []= $elem873;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -25557,14 -25617,14 +25841,14 @@@
          case 6:
            if ($ftype == TType::LST) {
              $this->group_names = array();
-             $_size868 = 0;
-             $_etype871 = 0;
-             $xfer += $input->readListBegin($_etype871, $_size868);
-             for ($_i872 = 0; $_i872 < $_size868; ++$_i872)
 -            $_size867 = 0;
 -            $_etype870 = 0;
 -            $xfer += $input->readListBegin($_etype870, $_size867);
 -            for ($_i871 = 0; $_i871 < $_size867; ++$_i871)
++            $_size874 = 0;
++            $_etype877 = 0;
++            $xfer += $input->readListBegin($_etype877, $_size874);
++            for ($_i878 = 0; $_i878 < $_size874; ++$_i878)
              {
-               $elem873 = null;
-               $xfer += $input->readString($elem873);
-               $this->group_names []= $elem873;
 -              $elem872 = null;
 -              $xfer += $input->readString($elem872);
 -              $this->group_names []= $elem872;
++              $elem879 = null;
++              $xfer += $input->readString($elem879);
++              $this->group_names []= $elem879;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -25602,9 -25662,9 +25886,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter874)
 -          foreach ($this->part_vals as $iter873)
++          foreach ($this->part_vals as $iter880)
            {
-             $xfer += $output->writeString($iter874);
 -            $xfer += $output->writeString($iter873);
++            $xfer += $output->writeString($iter880);
            }
          }
          $output->writeListEnd();
@@@ -25629,9 -25689,9 +25913,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->group_names));
          {
-           foreach ($this->group_names as $iter875)
 -          foreach ($this->group_names as $iter874)
++          foreach ($this->group_names as $iter881)
            {
-             $xfer += $output->writeString($iter875);
 -            $xfer += $output->writeString($iter874);
++            $xfer += $output->writeString($iter881);
            }
          }
          $output->writeListEnd();
@@@ -25720,15 -25780,15 +26004,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size876 = 0;
-             $_etype879 = 0;
-             $xfer += $input->readListBegin($_etype879, $_size876);
-             for ($_i880 = 0; $_i880 < $_size876; ++$_i880)
 -            $_size875 = 0;
 -            $_etype878 = 0;
 -            $xfer += $input->readListBegin($_etype878, $_size875);
 -            for ($_i879 = 0; $_i879 < $_size875; ++$_i879)
++            $_size882 = 0;
++            $_etype885 = 0;
++            $xfer += $input->readListBegin($_etype885, $_size882);
++            for ($_i886 = 0; $_i886 < $_size882; ++$_i886)
              {
-               $elem881 = null;
-               $elem881 = new \metastore\Partition();
-               $xfer += $elem881->read($input);
-               $this->success []= $elem881;
 -              $elem880 = null;
 -              $elem880 = new \metastore\Partition();
 -              $xfer += $elem880->read($input);
 -              $this->success []= $elem880;
++              $elem887 = null;
++              $elem887 = new \metastore\Partition();
++              $xfer += $elem887->read($input);
++              $this->success []= $elem887;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -25772,9 -25832,9 +26056,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter882)
 -          foreach ($this->success as $iter881)
++          foreach ($this->success as $iter888)
            {
-             $xfer += $iter882->write($output);
 -            $xfer += $iter881->write($output);
++            $xfer += $iter888->write($output);
            }
          }
          $output->writeListEnd();
@@@ -25895,14 -25955,14 +26179,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size883 = 0;
-             $_etype886 = 0;
-             $xfer += $input->readListBegin($_etype886, $_size883);
-             for ($_i887 = 0; $_i887 < $_size883; ++$_i887)
 -            $_size882 = 0;
 -            $_etype885 = 0;
 -            $xfer += $input->readListBegin($_etype885, $_size882);
 -            for ($_i886 = 0; $_i886 < $_size882; ++$_i886)
++            $_size889 = 0;
++            $_etype892 = 0;
++            $xfer += $input->readListBegin($_etype892, $_size889);
++            for ($_i893 = 0; $_i893 < $_size889; ++$_i893)
              {
-               $elem888 = null;
-               $xfer += $input->readString($elem888);
-               $this->part_vals []= $elem888;
 -              $elem887 = null;
 -              $xfer += $input->readString($elem887);
 -              $this->part_vals []= $elem887;
++              $elem894 = null;
++              $xfer += $input->readString($elem894);
++              $this->part_vals []= $elem894;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -25947,9 -26007,9 +26231,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter889)
 -          foreach ($this->part_vals as $iter888)
++          foreach ($this->part_vals as $iter895)
            {
-             $xfer += $output->writeString($iter889);
 -            $xfer += $output->writeString($iter888);
++            $xfer += $output->writeString($iter895);
            }
          }
          $output->writeListEnd();
@@@ -26042,14 -26102,14 +26326,14 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size890 = 0;
-             $_etype893 = 0;
-             $xfer += $input->readListBegin($_etype893, $_size890);
-             for ($_i894 = 0; $_i894 < $_size890; ++$_i894)
 -            $_size889 = 0;
 -            $_etype892 = 0;
 -            $xfer += $input->readListBegin($_etype892, $_size889);
 -            for ($_i893 = 0; $_i893 < $_size889; ++$_i893)
++            $_size896 = 0;
++            $_etype899 = 0;
++            $xfer += $input->readListBegin($_etype899, $_size896);
++            for ($_i900 = 0; $_i900 < $_size896; ++$_i900)
              {
-               $elem895 = null;
-               $xfer += $input->readString($elem895);
-               $this->success []= $elem895;
 -              $elem894 = null;
 -              $xfer += $input->readString($elem894);
 -              $this->success []= $elem894;
++              $elem901 = null;
++              $xfer += $input->readString($elem901);
++              $this->success []= $elem901;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -26093,9 -26153,9 +26377,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter896)
 -          foreach ($this->success as $iter895)
++          foreach ($this->success as $iter902)
            {
-             $xfer += $output->writeString($iter896);
 -            $xfer += $output->writeString($iter895);
++            $xfer += $output->writeString($iter902);
            }
          }
          $output->writeListEnd();
@@@ -26338,15 -26398,15 +26622,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size897 = 0;
-             $_etype900 = 0;
-             $xfer += $input->readListBegin($_etype900, $_size897);
-             for ($_i901 = 0; $_i901 < $_size897; ++$_i901)
 -            $_size896 = 0;
 -            $_etype899 = 0;
 -            $xfer += $input->readListBegin($_etype899, $_size896);
 -            for ($_i900 = 0; $_i900 < $_size896; ++$_i900)
++            $_size903 = 0;
++            $_etype906 = 0;
++            $xfer += $input->readListBegin($_etype906, $_size903);
++            for ($_i907 = 0; $_i907 < $_size903; ++$_i907)
              {
-               $elem902 = null;
-               $elem902 = new \metastore\Partition();
-               $xfer += $elem902->read($input);
-               $this->success []= $elem902;
 -              $elem901 = null;
 -              $elem901 = new \metastore\Partition();
 -              $xfer += $elem901->read($input);
 -              $this->success []= $elem901;
++              $elem908 = null;
++              $elem908 = new \metastore\Partition();
++              $xfer += $elem908->read($input);
++              $this->success []= $elem908;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -26390,9 -26450,9 +26674,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter903)
 -          foreach ($this->success as $iter902)
++          foreach ($this->success as $iter909)
            {
-             $xfer += $iter903->write($output);
 -            $xfer += $iter902->write($output);
++            $xfer += $iter909->write($output);
            }
          }
          $output->writeListEnd();
@@@ -26635,15 -26695,15 +26919,15 @@@ class ThriftHiveMetastore_get_part_spec
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size904 = 0;
-             $_etype907 = 0;
-             $xfer += $input->readListBegin($_etype907, $_size904);
-             for ($_i908 = 0; $_i908 < $_size904; ++$_i908)
 -            $_size903 = 0;
 -            $_etype906 = 0;
 -            $xfer += $input->readListBegin($_etype906, $_size903);
 -            for ($_i907 = 0; $_i907 < $_size903; ++$_i907)
++            $_size910 = 0;
++            $_etype913 = 0;
++            $xfer += $input->readListBegin($_etype913, $_size910);
++            for ($_i914 = 0; $_i914 < $_size910; ++$_i914)
              {
-               $elem909 = null;
-               $elem909 = new \metastore\PartitionSpec();
-               $xfer += $elem909->read($input);
-               $this->success []= $elem909;
 -              $elem908 = null;
 -              $elem908 = new \metastore\PartitionSpec();
 -              $xfer += $elem908->read($input);
 -              $this->success []= $elem908;
++              $elem915 = null;
++              $elem915 = new \metastore\PartitionSpec();
++              $xfer += $elem915->read($input);
++              $this->success []= $elem915;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -26687,9 -26747,9 +26971,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter910)
 -          foreach ($this->success as $iter909)
++          foreach ($this->success as $iter916)
            {
-             $xfer += $iter910->write($output);
 -            $xfer += $iter909->write($output);
++            $xfer += $iter916->write($output);
            }
          }
          $output->writeListEnd();
@@@ -27255,14 -27315,14 +27539,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->names = array();
-             $_size911 = 0;
-             $_etype914 = 0;
-             $xfer += $input->readListBegin($_etype914, $_size911);
-             for ($_i915 = 0; $_i915 < $_size911; ++$_i915)
 -            $_size910 = 0;
 -            $_etype913 = 0;
 -            $xfer += $input->readListBegin($_etype913, $_size910);
 -            for ($_i914 = 0; $_i914 < $_size910; ++$_i914)
++            $_size917 = 0;
++            $_etype920 = 0;
++            $xfer += $input->readListBegin($_etype920, $_size917);
++            for ($_i921 = 0; $_i921 < $_size917; ++$_i921)
              {
-               $elem916 = null;
-               $xfer += $input->readString($elem916);
-               $this->names []= $elem916;
 -              $elem915 = null;
 -              $xfer += $input->readString($elem915);
 -              $this->names []= $elem915;
++              $elem922 = null;
++              $xfer += $input->readString($elem922);
++              $this->names []= $elem922;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -27300,9 -27360,9 +27584,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->names));
          {
-           foreach ($this->names as $iter917)
 -          foreach ($this->names as $iter916)
++          foreach ($this->names as $iter923)
            {
-             $xfer += $output->writeString($iter917);
 -            $xfer += $output->writeString($iter916);
++            $xfer += $output->writeString($iter923);
            }
          }
          $output->writeListEnd();
@@@ -27391,15 -27451,15 +27675,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size918 = 0;
-             $_etype921 = 0;
-             $xfer += $in

<TRUNCATED>

[44/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-php/metastore/Types.php
index 4dcfc76,74f0028..a3201cc
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@@ -18228,882 -18277,17 +18266,882 @@@ class HeartbeatWriteIdRequest 
        $xfer += $output->writeString($this->dbName);
        $xfer += $output->writeFieldEnd();
      }
 -    if ($this->tblNames !== null) {
 -      if (!is_array($this->tblNames)) {
 +    if ($this->tblName !== null) {
 +      $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2);
 +      $xfer += $output->writeString($this->tblName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->writeId !== null) {
 +      $xfer += $output->writeFieldBegin('writeId', TType::I64, 3);
 +      $xfer += $output->writeI64($this->writeId);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class HeartbeatWriteIdResult {
 +  static $_TSPEC;
 +
 +
 +  public function __construct() {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        );
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'HeartbeatWriteIdResult';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('HeartbeatWriteIdResult');
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class GetValidWriteIdsRequest {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var string
 +   */
 +  public $dbName = null;
 +  /**
 +   * @var string
 +   */
 +  public $tblName = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'dbName',
 +          'type' => TType::STRING,
 +          ),
 +        2 => array(
 +          'var' => 'tblName',
 +          'type' => TType::STRING,
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['dbName'])) {
 +        $this->dbName = $vals['dbName'];
 +      }
 +      if (isset($vals['tblName'])) {
 +        $this->tblName = $vals['tblName'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'GetValidWriteIdsRequest';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->dbName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 2:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->tblName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('GetValidWriteIdsRequest');
 +    if ($this->dbName !== null) {
 +      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
 +      $xfer += $output->writeString($this->dbName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->tblName !== null) {
 +      $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2);
 +      $xfer += $output->writeString($this->tblName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class GetValidWriteIdsResult {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var int
 +   */
 +  public $lowWatermarkId = null;
 +  /**
 +   * @var int
 +   */
 +  public $highWatermarkId = null;
 +  /**
 +   * @var bool
 +   */
 +  public $areIdsValid = null;
 +  /**
 +   * @var int[]
 +   */
 +  public $ids = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'lowWatermarkId',
 +          'type' => TType::I64,
 +          ),
 +        2 => array(
 +          'var' => 'highWatermarkId',
 +          'type' => TType::I64,
 +          ),
 +        3 => array(
 +          'var' => 'areIdsValid',
 +          'type' => TType::BOOL,
 +          ),
 +        4 => array(
 +          'var' => 'ids',
 +          'type' => TType::LST,
 +          'etype' => TType::I64,
 +          'elem' => array(
 +            'type' => TType::I64,
 +            ),
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['lowWatermarkId'])) {
 +        $this->lowWatermarkId = $vals['lowWatermarkId'];
 +      }
 +      if (isset($vals['highWatermarkId'])) {
 +        $this->highWatermarkId = $vals['highWatermarkId'];
 +      }
 +      if (isset($vals['areIdsValid'])) {
 +        $this->areIdsValid = $vals['areIdsValid'];
 +      }
 +      if (isset($vals['ids'])) {
 +        $this->ids = $vals['ids'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'GetValidWriteIdsResult';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::I64) {
 +            $xfer += $input->readI64($this->lowWatermarkId);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 2:
 +          if ($ftype == TType::I64) {
 +            $xfer += $input->readI64($this->highWatermarkId);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 3:
 +          if ($ftype == TType::BOOL) {
 +            $xfer += $input->readBool($this->areIdsValid);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 4:
 +          if ($ftype == TType::LST) {
 +            $this->ids = array();
-             $_size569 = 0;
-             $_etype572 = 0;
-             $xfer += $input->readListBegin($_etype572, $_size569);
-             for ($_i573 = 0; $_i573 < $_size569; ++$_i573)
++            $_size568 = 0;
++            $_etype571 = 0;
++            $xfer += $input->readListBegin($_etype571, $_size568);
++            for ($_i572 = 0; $_i572 < $_size568; ++$_i572)
 +            {
-               $elem574 = null;
-               $xfer += $input->readI64($elem574);
-               $this->ids []= $elem574;
++              $elem573 = null;
++              $xfer += $input->readI64($elem573);
++              $this->ids []= $elem573;
 +            }
 +            $xfer += $input->readListEnd();
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('GetValidWriteIdsResult');
 +    if ($this->lowWatermarkId !== null) {
 +      $xfer += $output->writeFieldBegin('lowWatermarkId', TType::I64, 1);
 +      $xfer += $output->writeI64($this->lowWatermarkId);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->highWatermarkId !== null) {
 +      $xfer += $output->writeFieldBegin('highWatermarkId', TType::I64, 2);
 +      $xfer += $output->writeI64($this->highWatermarkId);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->areIdsValid !== null) {
 +      $xfer += $output->writeFieldBegin('areIdsValid', TType::BOOL, 3);
 +      $xfer += $output->writeBool($this->areIdsValid);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->ids !== null) {
 +      if (!is_array($this->ids)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('ids', TType::LST, 4);
 +      {
 +        $output->writeListBegin(TType::I64, count($this->ids));
 +        {
-           foreach ($this->ids as $iter575)
++          foreach ($this->ids as $iter574)
 +          {
-             $xfer += $output->writeI64($iter575);
++            $xfer += $output->writeI64($iter574);
 +          }
 +        }
 +        $output->writeListEnd();
 +      }
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class GetAllFunctionsResponse {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var \metastore\Function[]
 +   */
 +  public $functions = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'functions',
 +          'type' => TType::LST,
 +          'etype' => TType::STRUCT,
 +          'elem' => array(
 +            'type' => TType::STRUCT,
 +            'class' => '\metastore\Function',
 +            ),
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['functions'])) {
 +        $this->functions = $vals['functions'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'GetAllFunctionsResponse';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::LST) {
 +            $this->functions = array();
-             $_size576 = 0;
-             $_etype579 = 0;
-             $xfer += $input->readListBegin($_etype579, $_size576);
-             for ($_i580 = 0; $_i580 < $_size576; ++$_i580)
++            $_size575 = 0;
++            $_etype578 = 0;
++            $xfer += $input->readListBegin($_etype578, $_size575);
++            for ($_i579 = 0; $_i579 < $_size575; ++$_i579)
 +            {
-               $elem581 = null;
-               $elem581 = new \metastore\Function();
-               $xfer += $elem581->read($input);
-               $this->functions []= $elem581;
++              $elem580 = null;
++              $elem580 = new \metastore\Function();
++              $xfer += $elem580->read($input);
++              $this->functions []= $elem580;
 +            }
 +            $xfer += $input->readListEnd();
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('GetAllFunctionsResponse');
 +    if ($this->functions !== null) {
 +      if (!is_array($this->functions)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('functions', TType::LST, 1);
 +      {
 +        $output->writeListBegin(TType::STRUCT, count($this->functions));
 +        {
-           foreach ($this->functions as $iter582)
++          foreach ($this->functions as $iter581)
 +          {
-             $xfer += $iter582->write($output);
++            $xfer += $iter581->write($output);
 +          }
 +        }
 +        $output->writeListEnd();
 +      }
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class ClientCapabilities {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var int[]
 +   */
 +  public $values = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'values',
 +          'type' => TType::LST,
 +          'etype' => TType::I32,
 +          'elem' => array(
 +            'type' => TType::I32,
 +            ),
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['values'])) {
 +        $this->values = $vals['values'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'ClientCapabilities';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::LST) {
 +            $this->values = array();
-             $_size583 = 0;
-             $_etype586 = 0;
-             $xfer += $input->readListBegin($_etype586, $_size583);
-             for ($_i587 = 0; $_i587 < $_size583; ++$_i587)
++            $_size582 = 0;
++            $_etype585 = 0;
++            $xfer += $input->readListBegin($_etype585, $_size582);
++            for ($_i586 = 0; $_i586 < $_size582; ++$_i586)
 +            {
-               $elem588 = null;
-               $xfer += $input->readI32($elem588);
-               $this->values []= $elem588;
++              $elem587 = null;
++              $xfer += $input->readI32($elem587);
++              $this->values []= $elem587;
 +            }
 +            $xfer += $input->readListEnd();
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('ClientCapabilities');
 +    if ($this->values !== null) {
 +      if (!is_array($this->values)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('values', TType::LST, 1);
 +      {
 +        $output->writeListBegin(TType::I32, count($this->values));
 +        {
-           foreach ($this->values as $iter589)
++          foreach ($this->values as $iter588)
 +          {
-             $xfer += $output->writeI32($iter589);
++            $xfer += $output->writeI32($iter588);
 +          }
 +        }
 +        $output->writeListEnd();
 +      }
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class GetTableRequest {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var string
 +   */
 +  public $dbName = null;
 +  /**
 +   * @var string
 +   */
 +  public $tblName = null;
 +  /**
 +   * @var \metastore\ClientCapabilities
 +   */
 +  public $capabilities = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'dbName',
 +          'type' => TType::STRING,
 +          ),
 +        2 => array(
 +          'var' => 'tblName',
 +          'type' => TType::STRING,
 +          ),
 +        3 => array(
 +          'var' => 'capabilities',
 +          'type' => TType::STRUCT,
 +          'class' => '\metastore\ClientCapabilities',
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['dbName'])) {
 +        $this->dbName = $vals['dbName'];
 +      }
 +      if (isset($vals['tblName'])) {
 +        $this->tblName = $vals['tblName'];
 +      }
 +      if (isset($vals['capabilities'])) {
 +        $this->capabilities = $vals['capabilities'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'GetTableRequest';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->dbName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 2:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->tblName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 3:
 +          if ($ftype == TType::STRUCT) {
 +            $this->capabilities = new \metastore\ClientCapabilities();
 +            $xfer += $this->capabilities->read($input);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('GetTableRequest');
 +    if ($this->dbName !== null) {
 +      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
 +      $xfer += $output->writeString($this->dbName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->tblName !== null) {
 +      $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2);
 +      $xfer += $output->writeString($this->tblName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->capabilities !== null) {
 +      if (!is_object($this->capabilities)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('capabilities', TType::STRUCT, 3);
 +      $xfer += $this->capabilities->write($output);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class GetTableResult {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var \metastore\Table
 +   */
 +  public $table = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'table',
 +          'type' => TType::STRUCT,
 +          'class' => '\metastore\Table',
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['table'])) {
 +        $this->table = $vals['table'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'GetTableResult';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::STRUCT) {
 +            $this->table = new \metastore\Table();
 +            $xfer += $this->table->read($input);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('GetTableResult');
 +    if ($this->table !== null) {
 +      if (!is_object($this->table)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('table', TType::STRUCT, 1);
 +      $xfer += $this->table->write($output);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class GetTablesRequest {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var string
 +   */
 +  public $dbName = null;
 +  /**
 +   * @var string[]
 +   */
 +  public $tblNames = null;
 +  /**
 +   * @var \metastore\ClientCapabilities
 +   */
 +  public $capabilities = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'dbName',
 +          'type' => TType::STRING,
 +          ),
 +        2 => array(
 +          'var' => 'tblNames',
 +          'type' => TType::LST,
 +          'etype' => TType::STRING,
 +          'elem' => array(
 +            'type' => TType::STRING,
 +            ),
 +          ),
 +        3 => array(
 +          'var' => 'capabilities',
 +          'type' => TType::STRUCT,
 +          'class' => '\metastore\ClientCapabilities',
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['dbName'])) {
 +        $this->dbName = $vals['dbName'];
 +      }
 +      if (isset($vals['tblNames'])) {
 +        $this->tblNames = $vals['tblNames'];
 +      }
 +      if (isset($vals['capabilities'])) {
 +        $this->capabilities = $vals['capabilities'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'GetTablesRequest';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->dbName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 2:
 +          if ($ftype == TType::LST) {
 +            $this->tblNames = array();
-             $_size590 = 0;
-             $_etype593 = 0;
-             $xfer += $input->readListBegin($_etype593, $_size590);
-             for ($_i594 = 0; $_i594 < $_size590; ++$_i594)
++            $_size589 = 0;
++            $_etype592 = 0;
++            $xfer += $input->readListBegin($_etype592, $_size589);
++            for ($_i593 = 0; $_i593 < $_size589; ++$_i593)
 +            {
-               $elem595 = null;
-               $xfer += $input->readString($elem595);
-               $this->tblNames []= $elem595;
++              $elem594 = null;
++              $xfer += $input->readString($elem594);
++              $this->tblNames []= $elem594;
 +            }
 +            $xfer += $input->readListEnd();
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 3:
 +          if ($ftype == TType::STRUCT) {
 +            $this->capabilities = new \metastore\ClientCapabilities();
 +            $xfer += $this->capabilities->read($input);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('GetTablesRequest');
 +    if ($this->dbName !== null) {
 +      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
 +      $xfer += $output->writeString($this->dbName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->tblNames !== null) {
 +      if (!is_array($this->tblNames)) {
          throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
        }
        $xfer += $output->writeFieldBegin('tblNames', TType::LST, 2);
        {
          $output->writeListBegin(TType::STRING, count($this->tblNames));
          {
-           foreach ($this->tblNames as $iter596)
 -          foreach ($this->tblNames as $iter588)
++          foreach ($this->tblNames as $iter595)
            {
-             $xfer += $output->writeString($iter596);
 -            $xfer += $output->writeString($iter588);
++            $xfer += $output->writeString($iter595);
            }
          }
          $output->writeListEnd();
@@@ -19176,15 -18360,15 +19214,15 @@@ class GetTablesResult 
          case 1:
            if ($ftype == TType::LST) {
              $this->tables = array();
-             $_size597 = 0;
-             $_etype600 = 0;
-             $xfer += $input->readListBegin($_etype600, $_size597);
-             for ($_i601 = 0; $_i601 < $_size597; ++$_i601)
 -            $_size589 = 0;
 -            $_etype592 = 0;
 -            $xfer += $input->readListBegin($_etype592, $_size589);
 -            for ($_i593 = 0; $_i593 < $_size589; ++$_i593)
++            $_size596 = 0;
++            $_etype599 = 0;
++            $xfer += $input->readListBegin($_etype599, $_size596);
++            for ($_i600 = 0; $_i600 < $_size596; ++$_i600)
              {
-               $elem602 = null;
-               $elem602 = new \metastore\Table();
-               $xfer += $elem602->read($input);
-               $this->tables []= $elem602;
 -              $elem594 = null;
 -              $elem594 = new \metastore\Table();
 -              $xfer += $elem594->read($input);
 -              $this->tables []= $elem594;
++              $elem601 = null;
++              $elem601 = new \metastore\Table();
++              $xfer += $elem601->read($input);
++              $this->tables []= $elem601;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -19212,9 -18396,9 +19250,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->tables));
          {
-           foreach ($this->tables as $iter603)
 -          foreach ($this->tables as $iter595)
++          foreach ($this->tables as $iter602)
            {
-             $xfer += $iter603->write($output);
 -            $xfer += $iter595->write($output);
++            $xfer += $iter602->write($output);
            }
          }
          $output->writeListEnd();

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------


[39/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/test/results/clientpositive/llap/mm_all.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/llap/mm_all.q.out
index 062d60f,0000000..49bb8cf
mode 100644,000000..100644
--- a/ql/src/test/results/clientpositive/llap/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out
@@@ -1,3241 -1,0 +1,3141 @@@
 +PREHOOK: query: drop table intermediate
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermediate
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermediate
 +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermediate
 +PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=455
 +POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=455
 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=456
 +POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=456
 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +PREHOOK: Output: default@intermediate@p=457
 +POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +POSTHOOK: Output: default@intermediate@p=457
 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 +PREHOOK: query: drop table part_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table part_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@part_mm
 +POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@part_mm
 +PREHOOK: query: explain insert into table part_mm partition(key_mm=455) select key from intermediate
 +PREHOOK: type: QUERY
 +POSTHOOK: query: explain insert into table part_mm partition(key_mm=455) select key from intermediate
 +POSTHOOK: type: QUERY
 +STAGE DEPENDENCIES:
 +  Stage-1 is a root stage
 +  Stage-2 depends on stages: Stage-1
 +  Stage-0 depends on stages: Stage-2
 +  Stage-3 depends on stages: Stage-0
 +
 +STAGE PLANS:
 +  Stage: Stage-1
 +    Tez
 +#### A masked pattern was here ####
 +      Vertices:
 +        Map 1 
 +            Map Operator Tree:
 +                TableScan
 +                  alias: intermediate
 +                  Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
 +                  Select Operator
 +                    expressions: key (type: int)
 +                    outputColumnNames: _col0
 +                    Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
 +                    File Output Operator
 +                      compressed: false
 +                      Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
 +                      table:
 +                          input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 +                          output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 +                          serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
 +                          name: default.part_mm
 +            Execution mode: llap
 +            LLAP IO: all inputs
 +
 +  Stage: Stage-2
 +    Dependency Collection
 +
 +  Stage: Stage-0
 +    Move Operator
 +      tables:
 +          partition:
 +            key_mm 455
 +          replace: false
 +          table:
 +              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
 +              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 +              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
 +              name: default.part_mm
 +          micromanaged table: true
 +
 +  Stage: Stage-3
 +    Stats-Aggr Operator
 +
 +PREHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_mm partition(key_mm=456) select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_mm@key_mm=456
 +POSTHOOK: query: insert into table part_mm partition(key_mm=456) select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_mm@key_mm=456
 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: query: insert into table part_mm partition(key_mm=455) select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from part_mm order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_mm
 +PREHOOK: Input: default@part_mm@key_mm=455
 +PREHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_mm order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_mm
 +POSTHOOK: Input: default@part_mm@key_mm=455
 +POSTHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +0	455
 +0	455
 +0	456
 +10	455
 +10	455
 +10	456
 +97	455
 +97	455
 +97	456
 +98	455
 +98	455
 +98	456
 +100	455
 +100	455
 +100	456
 +103	455
 +103	455
 +103	456
 +PREHOOK: query: select * from part_mm order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_mm
 +PREHOOK: Input: default@part_mm@key_mm=455
 +PREHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_mm order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_mm
 +POSTHOOK: Input: default@part_mm@key_mm=455
 +POSTHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +0	455
 +0	455
 +0	456
 +10	455
 +10	455
 +10	456
 +97	455
 +97	455
 +97	456
 +98	455
 +98	455
 +98	456
 +100	455
 +100	455
 +100	456
 +103	455
 +103	455
 +103	456
 +PREHOOK: query: truncate table part_mm
 +PREHOOK: type: TRUNCATETABLE
 +PREHOOK: Output: default@part_mm@key_mm=455
 +PREHOOK: Output: default@part_mm@key_mm=456
 +POSTHOOK: query: truncate table part_mm
 +POSTHOOK: type: TRUNCATETABLE
 +POSTHOOK: Output: default@part_mm@key_mm=455
 +POSTHOOK: Output: default@part_mm@key_mm=456
 +PREHOOK: query: select * from part_mm order by key, key_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@part_mm
 +PREHOOK: Input: default@part_mm@key_mm=455
 +PREHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from part_mm order by key, key_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@part_mm
 +POSTHOOK: Input: default@part_mm@key_mm=455
 +POSTHOOK: Input: default@part_mm@key_mm=456
 +#### A masked pattern was here ####
 +PREHOOK: query: drop table part_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@part_mm
 +PREHOOK: Output: default@part_mm
 +POSTHOOK: query: drop table part_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@part_mm
 +POSTHOOK: Output: default@part_mm
 +PREHOOK: query: drop table simple_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table simple_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@simple_mm
 +PREHOOK: query: insert into table simple_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: insert into table simple_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_mm
 +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert overwrite table simple_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: insert overwrite table simple_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_mm
 +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +0
 +10
 +97
 +98
 +100
 +103
 +PREHOOK: query: insert into table simple_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: insert into table simple_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@simple_mm
 +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from simple_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +0
 +0
 +10
 +10
 +97
 +97
 +98
 +98
 +100
 +100
 +103
 +103
 +PREHOOK: query: truncate table simple_mm
 +PREHOOK: type: TRUNCATETABLE
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: truncate table simple_mm
 +POSTHOOK: type: TRUNCATETABLE
 +POSTHOOK: Output: default@simple_mm
 +PREHOOK: query: select * from simple_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from simple_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@simple_mm
 +#### A masked pattern was here ####
 +PREHOOK: query: drop table simple_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@simple_mm
 +PREHOOK: Output: default@simple_mm
 +POSTHOOK: query: drop table simple_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@simple_mm
 +POSTHOOK: Output: default@simple_mm
 +PREHOOK: query: drop table dp_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table dp_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc
 +  tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@dp_mm
 +POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc
 +  tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@dp_mm
 +PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@dp_mm@key1=123
 +POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=0
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=10
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=100
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=103
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=97
 +POSTHOOK: Output: default@dp_mm@key1=123/key2=98
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=100).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=103).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=10).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=97).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=98).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from dp_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@dp_mm
 +PREHOOK: Input: default@dp_mm@key1=123/key2=0
 +PREHOOK: Input: default@dp_mm@key1=123/key2=10
 +PREHOOK: Input: default@dp_mm@key1=123/key2=100
 +PREHOOK: Input: default@dp_mm@key1=123/key2=103
 +PREHOOK: Input: default@dp_mm@key1=123/key2=97
 +PREHOOK: Input: default@dp_mm@key1=123/key2=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from dp_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@dp_mm
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=0
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=10
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=100
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=103
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=97
 +POSTHOOK: Input: default@dp_mm@key1=123/key2=98
 +#### A masked pattern was here ####
 +0	123	0
 +10	123	10
 +97	123	97
 +98	123	98
 +100	123	100
 +103	123	103
 +PREHOOK: query: drop table dp_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@dp_mm
 +PREHOOK: Output: default@dp_mm
 +POSTHOOK: query: drop table dp_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@dp_mm
 +POSTHOOK: Output: default@dp_mm
 +PREHOOK: query: create table union_mm(id int)  tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: create table union_mm(id int)  tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@union_mm
 +PREHOOK: query: insert into table union_mm 
 +select temps.p from (
 +select key as p from intermediate 
 +union all 
 +select key + 1 as p from intermediate ) temps
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: insert into table union_mm 
 +select temps.p from (
 +select key as p from intermediate 
 +union all 
 +select key + 1 as p from intermediate ) temps
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@union_mm
 +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from union_mm order by id
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from union_mm order by id
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +0
 +1
 +10
 +11
 +97
 +98
 +98
 +99
 +100
 +101
 +103
 +104
 +PREHOOK: query: insert into table union_mm 
 +select p from
 +(
 +select key + 1 as p from intermediate
 +union all
 +select key from intermediate
 +) tab group by p
 +union all
 +select key + 2 as p from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: insert into table union_mm 
 +select p from
 +(
 +select key + 1 as p from intermediate
 +union all
 +select key from intermediate
 +) tab group by p
 +union all
 +select key + 2 as p from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@union_mm
 +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from union_mm order by id
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from union_mm order by id
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +0
 +0
 +1
 +1
 +2
 +10
 +10
 +11
 +11
 +12
 +97
 +97
 +98
 +98
 +98
 +99
 +99
 +99
 +100
 +100
 +100
 +101
 +101
 +102
 +103
 +103
 +104
 +104
 +105
 +PREHOOK: query: insert into table union_mm
 +SELECT p FROM
 +(
 +  SELECT key + 1 as p FROM intermediate
 +  UNION ALL
 +  SELECT key as p FROM ( 
 +    SELECT distinct key FROM (
 +      SELECT key FROM (
 +        SELECT key + 2 as key FROM intermediate
 +        UNION ALL
 +        SELECT key FROM intermediate
 +      )t1 
 +    group by key)t2
 +  )t3
 +)t4
 +group by p
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: insert into table union_mm
 +SELECT p FROM
 +(
 +  SELECT key + 1 as p FROM intermediate
 +  UNION ALL
 +  SELECT key as p FROM ( 
 +    SELECT distinct key FROM (
 +      SELECT key FROM (
 +        SELECT key + 2 as key FROM intermediate
 +        UNION ALL
 +        SELECT key FROM intermediate
 +      )t1 
 +    group by key)t2
 +  )t3
 +)t4
 +group by p
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@union_mm
 +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from union_mm order by id
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from union_mm order by id
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@union_mm
 +#### A masked pattern was here ####
 +0
 +0
 +0
 +1
 +1
 +1
 +2
 +2
 +10
 +10
 +10
 +11
 +11
 +11
 +12
 +12
 +97
 +97
 +97
 +98
 +98
 +98
 +98
 +99
 +99
 +99
 +99
 +100
 +100
 +100
 +100
 +101
 +101
 +101
 +102
 +102
 +103
 +103
 +103
 +104
 +104
 +104
 +105
 +105
 +PREHOOK: query: drop table union_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@union_mm
 +PREHOOK: Output: default@union_mm
 +POSTHOOK: query: drop table union_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@union_mm
 +POSTHOOK: Output: default@union_mm
 +PREHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@partunion_mm
 +POSTHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@partunion_mm
 +PREHOOK: query: insert into table partunion_mm partition(key)
 +select temps.* from (
 +select key as p, key from intermediate 
 +union all 
 +select key + 1 as p, key + 1 from intermediate ) temps
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@partunion_mm
 +POSTHOOK: query: insert into table partunion_mm partition(key)
 +select temps.* from (
 +select key as p, key from intermediate 
 +union all 
 +select key + 1 as p, key + 1 from intermediate ) temps
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@partunion_mm@key=0
 +POSTHOOK: Output: default@partunion_mm@key=1
 +POSTHOOK: Output: default@partunion_mm@key=10
 +POSTHOOK: Output: default@partunion_mm@key=100
 +POSTHOOK: Output: default@partunion_mm@key=101
 +POSTHOOK: Output: default@partunion_mm@key=103
 +POSTHOOK: Output: default@partunion_mm@key=104
 +POSTHOOK: Output: default@partunion_mm@key=11
 +POSTHOOK: Output: default@partunion_mm@key=97
 +POSTHOOK: Output: default@partunion_mm@key=98
 +POSTHOOK: Output: default@partunion_mm@key=99
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=100).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=103).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=10).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: partunion_mm PARTITION(key=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from partunion_mm order by id
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@partunion_mm
 +PREHOOK: Input: default@partunion_mm@key=0
 +PREHOOK: Input: default@partunion_mm@key=1
 +PREHOOK: Input: default@partunion_mm@key=10
 +PREHOOK: Input: default@partunion_mm@key=100
 +PREHOOK: Input: default@partunion_mm@key=101
 +PREHOOK: Input: default@partunion_mm@key=103
 +PREHOOK: Input: default@partunion_mm@key=104
 +PREHOOK: Input: default@partunion_mm@key=11
 +PREHOOK: Input: default@partunion_mm@key=97
 +PREHOOK: Input: default@partunion_mm@key=98
 +PREHOOK: Input: default@partunion_mm@key=99
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from partunion_mm order by id
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@partunion_mm
 +POSTHOOK: Input: default@partunion_mm@key=0
 +POSTHOOK: Input: default@partunion_mm@key=1
 +POSTHOOK: Input: default@partunion_mm@key=10
 +POSTHOOK: Input: default@partunion_mm@key=100
 +POSTHOOK: Input: default@partunion_mm@key=101
 +POSTHOOK: Input: default@partunion_mm@key=103
 +POSTHOOK: Input: default@partunion_mm@key=104
 +POSTHOOK: Input: default@partunion_mm@key=11
 +POSTHOOK: Input: default@partunion_mm@key=97
 +POSTHOOK: Input: default@partunion_mm@key=98
 +POSTHOOK: Input: default@partunion_mm@key=99
 +#### A masked pattern was here ####
 +0	0
 +1	1
 +10	10
 +11	11
 +97	97
 +98	98
 +98	98
 +99	99
 +100	100
 +101	101
 +103	103
 +104	104
 +PREHOOK: query: drop table partunion_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@partunion_mm
 +PREHOOK: Output: default@partunion_mm
 +POSTHOOK: query: drop table partunion_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@partunion_mm
 +POSTHOOK: Output: default@partunion_mm
 +PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3))
 + stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@skew_mm
 +POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3))
 + stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@skew_mm
 +PREHOOK: query: insert into table skew_mm 
 +select key, key, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@skew_mm
 +POSTHOOK: query: insert into table skew_mm 
 +select key, key, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@skew_mm
 +POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_mm.k4 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
- PREHOOK: query: select * from skew_mm order by k2
++PREHOOK: query: select * from skew_mm order by k2, k1, k4
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@skew_mm
 +#### A masked pattern was here ####
- POSTHOOK: query: select * from skew_mm order by k2
++POSTHOOK: query: select * from skew_mm order by k2, k1, k4
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@skew_mm
 +#### A masked pattern was here ####
 +0	0	0
 +10	10	10
 +97	97	97
 +98	98	98
 +100	100	100
 +103	103	103
 +PREHOOK: query: drop table skew_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@skew_mm
 +PREHOOK: Output: default@skew_mm
 +POSTHOOK: query: drop table skew_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@skew_mm
 +POSTHOOK: Output: default@skew_mm
 +PREHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) 
 +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@skew_dp_union_mm
 +POSTHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) 
 +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@skew_dp_union_mm
 +PREHOOK: query: insert into table skew_dp_union_mm partition (k3)
 +select key as i, key as j, key as k, key as l from intermediate
 +union all 
 +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@skew_dp_union_mm
 +POSTHOOK: query: insert into table skew_dp_union_mm partition (k3)
 +select key as i, key as j, key as k, key as l from intermediate
 +union all 
 +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=0
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=10
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=100
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=101
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=102
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=103
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=104
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=107
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=14
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=4
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=97
 +POSTHOOK: Output: default@skew_dp_union_mm@k3=98
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
- PREHOOK: query: select * from skew_dp_union_mm order by k2
++PREHOOK: query: select * from skew_dp_union_mm order by k2, k1, k4
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@skew_dp_union_mm
 +PREHOOK: Input: default@skew_dp_union_mm@k3=0
 +PREHOOK: Input: default@skew_dp_union_mm@k3=10
 +PREHOOK: Input: default@skew_dp_union_mm@k3=100
 +PREHOOK: Input: default@skew_dp_union_mm@k3=101
 +PREHOOK: Input: default@skew_dp_union_mm@k3=102
 +PREHOOK: Input: default@skew_dp_union_mm@k3=103
 +PREHOOK: Input: default@skew_dp_union_mm@k3=104
 +PREHOOK: Input: default@skew_dp_union_mm@k3=107
 +PREHOOK: Input: default@skew_dp_union_mm@k3=14
 +PREHOOK: Input: default@skew_dp_union_mm@k3=4
 +PREHOOK: Input: default@skew_dp_union_mm@k3=97
 +PREHOOK: Input: default@skew_dp_union_mm@k3=98
 +#### A masked pattern was here ####
- POSTHOOK: query: select * from skew_dp_union_mm order by k2
++POSTHOOK: query: select * from skew_dp_union_mm order by k2, k1, k4
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@skew_dp_union_mm
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=0
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=10
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=100
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=101
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=102
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=103
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=104
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=107
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=14
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=4
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=97
 +POSTHOOK: Input: default@skew_dp_union_mm@k3=98
 +#### A masked pattern was here ####
 +0	0	0	0
 +1	2	3	4
 +10	10	10	10
 +11	12	13	14
 +97	97	97	97
 +98	98	98	98
 +98	99	100	101
 +99	100	101	102
 +100	100	100	100
 +101	102	103	104
 +103	103	103	103
 +104	105	106	107
 +PREHOOK: query: drop table skew_dp_union_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@skew_dp_union_mm
 +PREHOOK: Output: default@skew_dp_union_mm
 +POSTHOOK: query: drop table skew_dp_union_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@skew_dp_union_mm
 +POSTHOOK: Output: default@skew_dp_union_mm
 +PREHOOK: query: create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@merge0_mm
 +POSTHOOK: query: create table merge0_mm (id int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@merge0_mm
 +PREHOOK: query: insert into table merge0_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge0_mm
 +POSTHOOK: query: insert into table merge0_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge0_mm
 +POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge0_mm
 +#### A masked pattern was here ####
 +98
 +97
 +100
 +103
 +0
 +10
 +PREHOOK: query: insert into table merge0_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge0_mm
 +POSTHOOK: query: insert into table merge0_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge0_mm
 +POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge0_mm
 +#### A masked pattern was here ####
 +98
 +97
 +100
 +103
 +0
 +10
 +98
 +97
 +100
 +103
 +0
 +10
 +PREHOOK: query: drop table merge0_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@merge0_mm
 +PREHOOK: Output: default@merge0_mm
 +POSTHOOK: query: drop table merge0_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@merge0_mm
 +POSTHOOK: Output: default@merge0_mm
 +PREHOOK: query: create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@merge2_mm
 +POSTHOOK: query: create table merge2_mm (id int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@merge2_mm
 +PREHOOK: query: insert into table merge2_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge2_mm
 +POSTHOOK: query: insert into table merge2_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge2_mm
 +POSTHOOK: Lineage: merge2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge2_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge2_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge2_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge2_mm
 +#### A masked pattern was here ####
 +98
 +97
 +100
 +103
 +0
 +10
 +PREHOOK: query: insert into table merge2_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge2_mm
 +POSTHOOK: query: insert into table merge2_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge2_mm
 +POSTHOOK: Lineage: merge2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from merge2_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge2_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from merge2_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge2_mm
 +#### A masked pattern was here ####
 +98
 +97
 +100
 +103
 +0
 +10
 +98
 +97
 +100
 +103
 +0
 +10
 +PREHOOK: query: drop table merge2_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@merge2_mm
 +PREHOOK: Output: default@merge2_mm
 +POSTHOOK: query: drop table merge2_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@merge2_mm
 +POSTHOOK: Output: default@merge2_mm
 +PREHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@merge1_mm
 +POSTHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@merge1_mm
 +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge1_mm
 +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge1_mm@key=0
 +POSTHOOK: Output: default@merge1_mm@key=10
 +POSTHOOK: Output: default@merge1_mm@key=100
 +POSTHOOK: Output: default@merge1_mm@key=103
 +POSTHOOK: Output: default@merge1_mm@key=97
 +POSTHOOK: Output: default@merge1_mm@key=98
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
- PREHOOK: query: select * from merge1_mm
++PREHOOK: query: select * from merge1_mm order by id, key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge1_mm
 +PREHOOK: Input: default@merge1_mm@key=0
 +PREHOOK: Input: default@merge1_mm@key=10
 +PREHOOK: Input: default@merge1_mm@key=100
 +PREHOOK: Input: default@merge1_mm@key=103
 +PREHOOK: Input: default@merge1_mm@key=97
 +PREHOOK: Input: default@merge1_mm@key=98
 +#### A masked pattern was here ####
- POSTHOOK: query: select * from merge1_mm
++POSTHOOK: query: select * from merge1_mm order by id, key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge1_mm
 +POSTHOOK: Input: default@merge1_mm@key=0
 +POSTHOOK: Input: default@merge1_mm@key=10
 +POSTHOOK: Input: default@merge1_mm@key=100
 +POSTHOOK: Input: default@merge1_mm@key=103
 +POSTHOOK: Input: default@merge1_mm@key=97
 +POSTHOOK: Input: default@merge1_mm@key=98
 +#### A masked pattern was here ####
- 100	100
- 103	103
- 97	97
- 98	98
 +0	0
 +10	10
++97	97
++98	98
++100	100
++103	103
 +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@merge1_mm
 +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@merge1_mm@key=0
 +POSTHOOK: Output: default@merge1_mm@key=10
 +POSTHOOK: Output: default@merge1_mm@key=100
 +POSTHOOK: Output: default@merge1_mm@key=103
 +POSTHOOK: Output: default@merge1_mm@key=97
 +POSTHOOK: Output: default@merge1_mm@key=98
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
- PREHOOK: query: select * from merge1_mm
++PREHOOK: query: select * from merge1_mm order by id, key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@merge1_mm
 +PREHOOK: Input: default@merge1_mm@key=0
 +PREHOOK: Input: default@merge1_mm@key=10
 +PREHOOK: Input: default@merge1_mm@key=100
 +PREHOOK: Input: default@merge1_mm@key=103
 +PREHOOK: Input: default@merge1_mm@key=97
 +PREHOOK: Input: default@merge1_mm@key=98
 +#### A masked pattern was here ####
- POSTHOOK: query: select * from merge1_mm
++POSTHOOK: query: select * from merge1_mm order by id, key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@merge1_mm
 +POSTHOOK: Input: default@merge1_mm@key=0
 +POSTHOOK: Input: default@merge1_mm@key=10
 +POSTHOOK: Input: default@merge1_mm@key=100
 +POSTHOOK: Input: default@merge1_mm@key=103
 +POSTHOOK: Input: default@merge1_mm@key=97
 +POSTHOOK: Input: default@merge1_mm@key=98
 +#### A masked pattern was here ####
- 100	100
- 100	100
- 103	103
- 103	103
- 97	97
- 97	97
- 98	98
- 98	98
 +0	0
 +0	0
 +10	10
 +10	10
++97	97
++97	97
++98	98
++98	98
++100	100
++100	100
++103	103
++103	103
 +PREHOOK: query: drop table merge1_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@merge1_mm
 +PREHOOK: Output: default@merge1_mm
 +POSTHOOK: query: drop table merge1_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@merge1_mm
 +POSTHOOK: Output: default@merge1_mm
 +PREHOOK: query: drop table ctas0_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table ctas0_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
 +PREHOOK: type: CREATETABLE_AS_SELECT
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@ctas0_mm
 +POSTHOOK: query: create table ctas0_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as select * from intermediate
 +POSTHOOK: type: CREATETABLE_AS_SELECT
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@ctas0_mm
 +POSTHOOK: Lineage: ctas0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: ctas0_mm.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 +PREHOOK: query: select * from ctas0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@ctas0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from ctas0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@ctas0_mm
 +#### A masked pattern was here ####
 +98	455
 +97	455
 +100	457
 +103	457
 +0	456
 +10	456
 +PREHOOK: query: drop table ctas0_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@ctas0_mm
 +PREHOOK: Output: default@ctas0_mm
 +POSTHOOK: query: drop table ctas0_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@ctas0_mm
 +POSTHOOK: Output: default@ctas0_mm
 +PREHOOK: query: drop table ctas1_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table ctas1_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
 +  select * from intermediate union all select * from intermediate
 +PREHOOK: type: CREATETABLE_AS_SELECT
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@ctas1_mm
 +POSTHOOK: query: create table ctas1_mm tblproperties ("transactional"="true", "transactional_properties"="insert_only") as
 +  select * from intermediate union all select * from intermediate
 +POSTHOOK: type: CREATETABLE_AS_SELECT
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@ctas1_mm
 +POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: ctas1_mm.p EXPRESSION [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 +PREHOOK: query: select * from ctas1_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@ctas1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from ctas1_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@ctas1_mm
 +#### A masked pattern was here ####
 +98	455
 +97	455
 +100	457
 +103	457
 +0	456
 +10	456
 +98	455
 +97	455
 +100	457
 +103	457
 +0	456
 +10	456
 +PREHOOK: query: drop table ctas1_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@ctas1_mm
 +PREHOOK: Output: default@ctas1_mm
 +POSTHOOK: query: drop table ctas1_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@ctas1_mm
 +POSTHOOK: Output: default@ctas1_mm
 +PREHOOK: query: drop table iow0_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table iow0_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: create table iow0_mm(key int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@iow0_mm
 +PREHOOK: query: insert overwrite table iow0_mm select key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: insert overwrite table iow0_mm select key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow0_mm
 +POSTHOOK: Lineage: iow0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table iow0_mm select key + 1 from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: insert into table iow0_mm select key + 1 from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow0_mm
 +POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow0_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow0_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow0_mm
 +#### A masked pattern was here ####
 +0
 +1
 +10
 +11
 +97
 +98
 +98
 +99
 +100
 +101
 +103
 +104
 +PREHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow0_mm
 +POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow0_mm order by key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow0_mm order by key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow0_mm
 +#### A masked pattern was here ####
 +2
 +12
 +99
 +100
 +102
 +105
 +PREHOOK: query: drop table iow0_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@iow0_mm
 +PREHOOK: Output: default@iow0_mm
 +POSTHOOK: query: drop table iow0_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@iow0_mm
 +POSTHOOK: Output: default@iow0_mm
 +PREHOOK: query: drop table iow1_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table iow1_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: create table iow1_mm(key int) partitioned by (key2 int)  tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@iow1_mm
 +PREHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key as k1, key from intermediate union all select key as k1, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key as k1, key from intermediate union all select key as k1, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow1_mm@key2=0
 +POSTHOOK: Output: default@iow1_mm@key2=10
 +POSTHOOK: Output: default@iow1_mm@key2=100
 +POSTHOOK: Output: default@iow1_mm@key2=103
 +POSTHOOK: Output: default@iow1_mm@key2=97
 +POSTHOOK: Output: default@iow1_mm@key2=98
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: insert into table iow1_mm partition (key2)
 +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: insert into table iow1_mm partition (key2)
 +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow1_mm@key2=0
 +POSTHOOK: Output: default@iow1_mm@key2=10
 +POSTHOOK: Output: default@iow1_mm@key2=100
 +POSTHOOK: Output: default@iow1_mm@key2=103
 +POSTHOOK: Output: default@iow1_mm@key2=97
 +POSTHOOK: Output: default@iow1_mm@key2=98
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow1_mm order by key, key2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow1_mm
 +PREHOOK: Input: default@iow1_mm@key2=0
 +PREHOOK: Input: default@iow1_mm@key2=10
 +PREHOOK: Input: default@iow1_mm@key2=100
 +PREHOOK: Input: default@iow1_mm@key2=103
 +PREHOOK: Input: default@iow1_mm@key2=97
 +PREHOOK: Input: default@iow1_mm@key2=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow1_mm order by key, key2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow1_mm
 +POSTHOOK: Input: default@iow1_mm@key2=0
 +POSTHOOK: Input: default@iow1_mm@key2=10
 +POSTHOOK: Input: default@iow1_mm@key2=100
 +POSTHOOK: Input: default@iow1_mm@key2=103
 +POSTHOOK: Input: default@iow1_mm@key2=97
 +POSTHOOK: Input: default@iow1_mm@key2=98
 +#### A masked pattern was here ####
 +0	0
 +0	0
 +0	0
 +1	0
 +10	10
 +10	10
 +10	10
 +11	10
 +97	97
 +97	97
 +97	97
 +98	97
 +98	98
 +98	98
 +98	98
 +99	98
 +100	100
 +100	100
 +100	100
 +101	100
 +103	103
 +103	103
 +103	103
 +104	103
 +PREHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow1_mm@key2=0
 +POSTHOOK: Output: default@iow1_mm@key2=10
 +POSTHOOK: Output: default@iow1_mm@key2=100
 +POSTHOOK: Output: default@iow1_mm@key2=103
 +POSTHOOK: Output: default@iow1_mm@key2=97
 +POSTHOOK: Output: default@iow1_mm@key2=98
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow1_mm order by key, key2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow1_mm
 +PREHOOK: Input: default@iow1_mm@key2=0
 +PREHOOK: Input: default@iow1_mm@key2=10
 +PREHOOK: Input: default@iow1_mm@key2=100
 +PREHOOK: Input: default@iow1_mm@key2=103
 +PREHOOK: Input: default@iow1_mm@key2=97
 +PREHOOK: Input: default@iow1_mm@key2=98
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow1_mm order by key, key2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow1_mm
 +POSTHOOK: Input: default@iow1_mm@key2=0
 +POSTHOOK: Input: default@iow1_mm@key2=10
 +POSTHOOK: Input: default@iow1_mm@key2=100
 +POSTHOOK: Input: default@iow1_mm@key2=103
 +POSTHOOK: Input: default@iow1_mm@key2=97
 +POSTHOOK: Input: default@iow1_mm@key2=98
 +#### A masked pattern was here ####
 +3	0
 +4	0
 +13	10
 +14	10
 +100	97
 +101	97
 +101	98
 +102	98
 +103	100
 +104	100
 +106	103
 +107	103
 +PREHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: insert overwrite table iow1_mm partition (key2)
 +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@iow1_mm@key2=100
 +POSTHOOK: Output: default@iow1_mm@key2=101
 +POSTHOOK: Output: default@iow1_mm@key2=102
 +POSTHOOK: Output: default@iow1_mm@key2=103
 +POSTHOOK: Output: default@iow1_mm@key2=105
 +POSTHOOK: Output: default@iow1_mm@key2=106
 +POSTHOOK: Output: default@iow1_mm@key2=12
 +POSTHOOK: Output: default@iow1_mm@key2=13
 +POSTHOOK: Output: default@iow1_mm@key2=2
 +POSTHOOK: Output: default@iow1_mm@key2=3
 +POSTHOOK: Output: default@iow1_mm@key2=99
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=105).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=106).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=12).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=13).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=2).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=3).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +PREHOOK: query: select * from iow1_mm order by key, key2
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@iow1_mm
 +PREHOOK: Input: default@iow1_mm@key2=0
 +PREHOOK: Input: default@iow1_mm@key2=10
 +PREHOOK: Input: default@iow1_mm@key2=100
 +PREHOOK: Input: default@iow1_mm@key2=101
 +PREHOOK: Input: default@iow1_mm@key2=102
 +PREHOOK: Input: default@iow1_mm@key2=103
 +PREHOOK: Input: default@iow1_mm@key2=105
 +PREHOOK: Input: default@iow1_mm@key2=106
 +PREHOOK: Input: default@iow1_mm@key2=12
 +PREHOOK: Input: default@iow1_mm@key2=13
 +PREHOOK: Input: default@iow1_mm@key2=2
 +PREHOOK: Input: default@iow1_mm@key2=3
 +PREHOOK: Input: default@iow1_mm@key2=97
 +PREHOOK: Input: default@iow1_mm@key2=98
 +PREHOOK: Input: default@iow1_mm@key2=99
 +#### A masked pattern was here ####
 +POSTHOOK: query: select * from iow1_mm order by key, key2
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@iow1_mm
 +POSTHOOK: Input: default@iow1_mm@key2=0
 +POSTHOOK: Input: default@iow1_mm@key2=10
 +POSTHOOK: Input: default@iow1_mm@key2=100
 +POSTHOOK: Input: default@iow1_mm@key2=101
 +POSTHOOK: Input: default@iow1_mm@key2=102
 +POSTHOOK: Input: default@iow1_mm@key2=103
 +POSTHOOK: Input: default@iow1_mm@key2=105
 +POSTHOOK: Input: default@iow1_mm@key2=106
 +POSTHOOK: Input: default@iow1_mm@key2=12
 +POSTHOOK: Input: default@iow1_mm@key2=13
 +POSTHOOK: Input: default@iow1_mm@key2=2
 +POSTHOOK: Input: default@iow1_mm@key2=3
 +POSTHOOK: Input: default@iow1_mm@key2=97
 +POSTHOOK: Input: default@iow1_mm@key2=98
 +POSTHOOK: Input: default@iow1_mm@key2=99
 +#### A masked pattern was here ####
 +2	2
 +3	0
 +3	3
 +4	0
 +12	12
 +13	10
 +13	13
 +14	10
 +99	99
 +100	97
 +100	100
 +100	100
 +101	97
 +101	98
 +101	101
 +102	98
 +102	102
 +103	103
 +105	105
 +106	106
 +PREHOOK: query: drop table iow1_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@iow1_mm
 +PREHOOK: Output: default@iow1_mm
 +POSTHOOK: query: drop table iow1_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@iow1_mm
 +POSTHOOK: Output: default@iow1_mm
 +PREHOOK: query: drop table load0_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table load0_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: select count(1) from load0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +500
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: select count(1) from load0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +1000
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: select count(1) from load0_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load0_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load0_mm
 +#### A masked pattern was here ####
 +500
 +PREHOOK: query: drop table load0_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@load0_mm
 +PREHOOK: Output: default@load0_mm
 +POSTHOOK: query: drop table load0_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@load0_mm
 +POSTHOOK: Output: default@load0_mm
 +PREHOOK: query: drop table intermediate2
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermediate2
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table intermediate2 (key string, value string) stored as textfile
 +#### A masked pattern was here ####
 +PREHOOK: type: CREATETABLE
 +#### A masked pattern was here ####
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: create table intermediate2 (key string, value string) stored as textfile
 +#### A masked pattern was here ####
 +POSTHOOK: type: CREATETABLE
 +#### A masked pattern was here ####
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: drop table load1_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table load1_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@load1_mm
 +POSTHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load1_mm
 +PREHOOK: query: select count(1) from load1_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load1_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +1000
 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load1_mm
 +PREHOOK: query: select count(1) from load1_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load1_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +1050
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load1_mm
 +PREHOOK: query: select count(1) from load1_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load1_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load1_mm
 +#### A masked pattern was here ####
 +500
 +PREHOOK: query: drop table load1_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@load1_mm
 +PREHOOK: Output: default@load1_mm
 +POSTHOOK: query: drop table load1_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@load1_mm
 +POSTHOOK: Output: default@load1_mm
 +PREHOOK: query: drop table load2_mm
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table load2_mm
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table load2_mm (key string, value string)
 +  partitioned by (k int, l int) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@load2_mm
 +POSTHOOK: query: create table load2_mm (key string, value string)
 +  partitioned by (k int, l int) stored as textfile tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@load2_mm
 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@intermediate2
 +#### A masked pattern was here ####
 +PREHOOK: type: LOAD
 +#### A masked pattern was here ####
 +PREHOOK: Output: default@load2_mm
 +#### A masked pattern was here ####
 +POSTHOOK: type: LOAD
 +#### A masked pattern was here ####
 +POSTHOOK: Output: default@load2_mm
 +POSTHOOK: Output: default@load2_mm@k=5/l=5
 +PREHOOK: query: select count(1) from load2_mm
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@load2_mm
 +PREHOOK: Input: default@load2_mm@k=5/l=5
 +#### A masked pattern was here ####
 +POSTHOOK: query: select count(1) from load2_mm
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@load2_mm
 +POSTHOOK: Input: default@load2_mm@k=5/l=5
 +#### A masked pattern was here ####
 +1025
 +PREHOOK: query: drop table load2_mm
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@load2_mm
 +PREHOOK: Output: default@load2_mm
 +POSTHOOK: query: drop table load2_mm
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@load2_mm
 +POSTHOOK: Output: default@load2_mm
 +PREHOOK: query: drop table intermediate2
 +PREHOOK: type: DROPTABLE
 +PREHOOK: Input: default@intermediate2
 +PREHOOK: Output: default@intermediate2
 +POSTHOOK: query: drop table intermediate2
 +POSTHOOK: type: DROPTABLE
 +POSTHOOK: Input: default@intermediate2
 +POSTHOOK: Output: default@intermediate2
 +PREHOOK: query: drop table intermediate_nonpart
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermediate_nonpart
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: drop table intermmediate_part
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermmediate_part
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: drop table intermmediate_nonpart
 +PREHOOK: type: DROPTABLE
 +POSTHOOK: query: drop table intermmediate_nonpart
 +POSTHOOK: type: DROPTABLE
 +PREHOOK: query: create table intermediate_nonpart(key int, p int)
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermediate_nonpart
 +POSTHOOK: query: create table intermediate_nonpart(key int, p int)
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermediate_nonpart
 +PREHOOK: query: insert into intermediate_nonpart select * from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@intermediate_nonpart
 +POSTHOOK: query: insert into intermediate_nonpart select * from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=456
 +POSTHOOK: Input: default@intermediate@p=457
 +POSTHOOK: Output: default@intermediate_nonpart
 +POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ]
 +POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ]
 +PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +PREHOOK: type: CREATETABLE
 +PREHOOK: Output: database:default
 +PREHOOK: Output: default@intermmediate_nonpart
 +POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties("transactional"="true", "transactional_properties"="insert_only")
 +POSTHOOK: type: CREATETABLE
 +POSTHOOK: Output: database:default
 +POSTHOOK: Output: default@intermmediate_nonpart
 +PREHOOK: query: insert into intermmediate_nonpart select * from intermediate
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@intermediate
 +PREHOOK: Input: default@intermediate@p=455
 +PREHOOK: Input: default@intermediate@p=456
 +PREHOOK: Input: default@intermediate@p=457
 +PREHOOK: Output: default@intermmediate_nonpart
 +POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@intermediate
 +POSTHOOK: Input: default@intermediate@p=455
 +POSTHOOK: Input: default@intermediate@p=

<TRUNCATED>

[41/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 3ad1733,4d727ba..8febcc0
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@@ -38,12 -23,10 +38,13 @@@ import org.apache.hadoop.fs.FSDataOutpu
  import org.apache.hadoop.fs.FileStatus;
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.fs.PathFilter;
  import org.apache.hadoop.hive.common.FileUtils;
 +import org.apache.hadoop.hive.common.HiveStatsUtils;
  import org.apache.hadoop.hive.common.StatsSetupConst;
 +import org.apache.hadoop.hive.common.ValidWriteIds;
  import org.apache.hadoop.hive.conf.HiveConf;
+ import org.apache.hadoop.hive.conf.HiveConfUtil;
  import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
  import org.apache.hadoop.hive.ql.CompilationOpContext;
  import org.apache.hadoop.hive.ql.ErrorMsg;
@@@ -172,24 -149,13 +172,24 @@@ public class FileSinkOperator extends T
      Path[] finalPaths;
      RecordWriter[] outWriters;
      RecordUpdater[] updaters;
 -    Stat stat;
 +    private Stat stat;
      int acidLastBucket = -1;
      int acidFileOffset = -1;
 +    private boolean isMmTable;
 +
 +    public FSPaths(Path specPath, boolean isMmTable) {
 +      this.isMmTable = isMmTable;
 +      if (!isMmTable) {
 +        tmpPath = Utilities.toTempPath(specPath);
 +        taskOutputTempPath = Utilities.toTaskTempPath(specPath);
 +      } else {
 +        tmpPath = specPath;
 +        taskOutputTempPath = null; // Should not be used.
-       } 
++      }
 +      Utilities.LOG14535.info("new FSPaths for " + numFiles + " files, dynParts = " + bDynParts
 +          + ": tmpPath " + tmpPath + ", task path " + taskOutputTempPath
 +          + " (spec path " + specPath + ")"/*, new Exception()*/);
  
 -    public FSPaths(Path specPath) {
 -      tmpPath = Utilities.toTempPath(specPath);
 -      taskOutputTempPath = Utilities.toTaskTempPath(specPath);
        outPaths = new Path[numFiles];
        finalPaths = new Path[numFiles];
        outWriters = new RecordWriter[numFiles];
@@@ -240,38 -206,30 +240,38 @@@
        }
      }
  
 -    private void commit(FileSystem fs) throws HiveException {
 +    private void commit(FileSystem fs, List<Path> commitPaths) throws HiveException {
        for (int idx = 0; idx < outPaths.length; ++idx) {
          try {
 -          if ((bDynParts || isSkewedStoredAsSubDirectories)
 -              && !fs.exists(finalPaths[idx].getParent())) {
 -            fs.mkdirs(finalPaths[idx].getParent());
 -          }
 -          boolean needToRename = true;
 -          if (conf.getWriteType() == AcidUtils.Operation.UPDATE ||
 -              conf.getWriteType() == AcidUtils.Operation.DELETE) {
 -            // If we're updating or deleting there may be no file to close.  This can happen
 -            // because the where clause strained out all of the records for a given bucket.  So
 -            // before attempting the rename below, check if our file exists.  If it doesn't,
 -            // then skip the rename.  If it does try it.  We could just blindly try the rename
 -            // and avoid the extra stat, but that would mask other errors.
 -            try {
 -              if (outPaths[idx] != null) {
 -                FileStatus stat = fs.getFileStatus(outPaths[idx]);
 -              }
 -            } catch (FileNotFoundException fnfe) {
 -              needToRename = false;
 -            }
 -          }
 -          if (needToRename && outPaths[idx] != null && !fs.rename(outPaths[idx], finalPaths[idx])) {
 +          commitOneOutPath(idx, fs, commitPaths);
 +        } catch (IOException e) {
 +          throw new HiveException("Unable to commit output from: " +
 +              outPaths[idx] + " to: " + finalPaths[idx], e);
 +        }
 +      }
 +    }
 +
 +    private void commitOneOutPath(int idx, FileSystem fs, List<Path> commitPaths)
 +        throws IOException, HiveException {
 +      if ((bDynParts || isSkewedStoredAsSubDirectories)
 +          && !fs.exists(finalPaths[idx].getParent())) {
 +        Utilities.LOG14535.info("commit making path for dyn/skew: " + finalPaths[idx].getParent());
-         FileUtils.mkdir(fs, finalPaths[idx].getParent(), inheritPerms, hconf);
++        FileUtils.mkdir(fs, finalPaths[idx].getParent(), hconf);
 +      }
 +      // If we're updating or deleting there may be no file to close.  This can happen
 +      // because the where clause strained out all of the records for a given bucket.  So
 +      // before attempting the rename below, check if our file exists.  If it doesn't,
 +      // then skip the rename.  If it does try it.  We could just blindly try the rename
 +      // and avoid the extra stat, but that would mask other errors.
 +      Operation acidOp = conf.getWriteType();
 +      boolean needToRename = outPaths[idx] != null && ((acidOp != Operation.UPDATE
 +          && acidOp != Operation.DELETE) || fs.exists(outPaths[idx]));
 +      if (needToRename && outPaths[idx] != null) {
 +        Utilities.LOG14535.info("committing " + outPaths[idx] + " to " + finalPaths[idx] + " (" + isMmTable + ")");
 +        if (isMmTable) {
 +          assert outPaths[idx].equals(finalPaths[idx]);
 +          commitPaths.add(outPaths[idx]);
 +        } else if (!fs.rename(outPaths[idx], finalPaths[idx])) {
              FileStatus fileStatus = FileUtils.getFileStatusOrNull(fs, finalPaths[idx]);
              if (fileStatus != null) {
                LOG.warn("Target path " + finalPaths[idx] + " with a size " + fileStatus.getLen() + " exists. Trying to delete it.");
@@@ -735,15 -613,9 +731,15 @@@
        Utilities.copyTableJobPropertiesToConf(conf.getTableInfo(), jc);
        // only create bucket files only if no dynamic partitions,
        // buckets of dynamic partitions will be created for each newly created partition
 -      if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) {
 +      if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID ||
 +          conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) {
 +        Path outPath = fsp.outPaths[filesIdx];
 +        if ((conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY || conf.isMmTable())
-             && inheritPerms && !FileUtils.mkdir(fs, outPath.getParent(), inheritPerms, hconf)) {
++            && !FileUtils.mkdir(fs, outPath.getParent(), hconf)) {
 +          LOG.warn("Unable to create directory with inheritPerms: " + outPath);
 +        }
          fsp.outWriters[filesIdx] = HiveFileFormatUtils.getHiveRecordWriter(jc, conf.getTableInfo(),
 -            outputClass, conf, fsp.outPaths[filesIdx], reporter);
 +            outputClass, conf, outPath, reporter);
          // If the record writer provides stats, get it from there instead of the serde
          statsFromRecordWriter[filesIdx] = fsp.outWriters[filesIdx] instanceof
              StatsProvidingRecordWriter;

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 29b72a0,f329b51..acf7404
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@@ -410,234 -547,6 +407,234 @@@ public class MoveTask extends Task<Move
        return (1);
      }
    }
 +
 +  private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd,
 +      TaskInformation ti) throws HiveException, IOException, InvalidOperationException {
 +    List<String> partVals = MetaStoreUtils.getPvals(table.getPartCols(),  tbd.getPartitionSpec());
 +    db.validatePartitionNameCharacters(partVals);
 +    Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath()
 +        + " into " + tbd.getTable().getTableName());
 +    boolean isCommitMmWrite = tbd.isCommitMmWrite();
-     db.loadSinglePartition(tbd.getSourcePath(), tbd.getTable().getTableName(),
++    db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(),
 +        tbd.getPartitionSpec(), tbd.getReplace(),
 +        tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(),
 +        (work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
 +         work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY),
 +        hasFollowingStatsTask(), tbd.getMmWriteId(), isCommitMmWrite);
 +    Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
 +
 +    // See the comment inside updatePartitionBucketSortColumns.
 +    if (!tbd.isMmTable() && (ti.bucketCols != null || ti.sortCols != null)) {
 +      updatePartitionBucketSortColumns(db, table, partn, ti.bucketCols,
 +          ti.numBuckets, ti.sortCols);
 +    }
 +
 +    DataContainer dc = new DataContainer(table.getTTable(), partn.getTPartition());
 +    // add this partition to post-execution hook
 +    if (work.getOutputs() != null) {
 +      DDLTask.addIfAbsentByName(new WriteEntity(partn,
 +        getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs());
 +    }
 +    return dc;
 +  }
 +
 +  private DataContainer handleDynParts(Hive db, Table table, LoadTableDesc tbd,
 +      TaskInformation ti, DynamicPartitionCtx dpCtx) throws HiveException,
 +      IOException, InvalidOperationException {
 +    DataContainer dc;
 +    List<LinkedHashMap<String, String>> dps = Utilities.getFullDPSpecs(conf, dpCtx);
 +
 +    console.printInfo(System.getProperty("line.separator"));
 +    long startTime = System.currentTimeMillis();
 +    // load the list of DP partitions and return the list of partition specs
 +    // TODO: In a follow-up to HIVE-1361, we should refactor loadDynamicPartitions
 +    // to use Utilities.getFullDPSpecs() to get the list of full partSpecs.
 +    // After that check the number of DPs created to not exceed the limit and
 +    // iterate over it and call loadPartition() here.
 +    // The reason we don't do inside HIVE-1361 is the latter is large and we
 +    // want to isolate any potential issue it may introduce.
 +    if (tbd.isMmTable() && !tbd.isCommitMmWrite()) {
 +      throw new HiveException("Only single-partition LoadTableDesc can skip commiting write ID");
 +    }
 +    Map<Map<String, String>, Partition> dp =
 +      db.loadDynamicPartitions(
 +        tbd.getSourcePath(),
 +        tbd.getTable().getTableName(),
 +        tbd.getPartitionSpec(),
 +        tbd.getReplace(),
 +        dpCtx.getNumDPCols(),
 +        (tbd.getLbCtx() == null) ? 0 : tbd.getLbCtx().calculateListBucketingLevel(),
 +        work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID &&
 +            work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY,
 +        SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(),
 +        work.getLoadTableWork().getWriteType(),
 +        tbd.getMmWriteId());
 +
 +    // publish DP columns to its subscribers
 +    if (dps != null && dps.size() > 0) {
 +      pushFeed(FeedType.DYNAMIC_PARTITIONS, dp.values());
 +    }
 +
 +    String loadTime = "\t Time taken to load dynamic partitions: "  +
 +        (System.currentTimeMillis() - startTime)/1000.0 + " seconds";
 +    console.printInfo(loadTime);
 +    LOG.info(loadTime);
 +
 +    if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) {
 +      throw new HiveException("This query creates no partitions." +
 +          " To turn off this error, set hive.error.on.empty.partition=false.");
 +    }
 +
 +    startTime = System.currentTimeMillis();
 +    // for each partition spec, get the partition
 +    // and put it to WriteEntity for post-exec hook
 +    for(Map.Entry<Map<String, String>, Partition> entry : dp.entrySet()) {
 +      Partition partn = entry.getValue();
 +
 +      // See the comment inside updatePartitionBucketSortColumns.
 +      if (!tbd.isMmTable() && (ti.bucketCols != null || ti.sortCols != null)) {
 +        updatePartitionBucketSortColumns(
 +            db, table, partn, ti.bucketCols, ti.numBuckets, ti.sortCols);
 +      }
 +
 +      WriteEntity enty = new WriteEntity(partn,
 +        getWriteType(tbd, work.getLoadTableWork().getWriteType()));
 +      if (work.getOutputs() != null) {
 +        DDLTask.addIfAbsentByName(enty, work.getOutputs());
 +      }
 +      // Need to update the queryPlan's output as well so that post-exec hook get executed.
 +      // This is only needed for dynamic partitioning since for SP the the WriteEntity is
 +      // constructed at compile time and the queryPlan already contains that.
 +      // For DP, WriteEntity creation is deferred at this stage so we need to update
 +      // queryPlan here.
 +      if (queryPlan.getOutputs() == null) {
 +        queryPlan.setOutputs(new LinkedHashSet<WriteEntity>());
 +      }
 +      queryPlan.getOutputs().add(enty);
 +
 +      // update columnar lineage for each partition
 +      dc = new DataContainer(table.getTTable(), partn.getTPartition());
 +
 +      // Don't set lineage on delete as we don't have all the columns
 +      if (SessionState.get() != null &&
 +          work.getLoadTableWork().getWriteType() != AcidUtils.Operation.DELETE &&
 +          work.getLoadTableWork().getWriteType() != AcidUtils.Operation.UPDATE) {
 +        SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc,
 +            table.getCols());
 +      }
 +      LOG.info("\tLoading partition " + entry.getKey());
 +    }
 +    console.printInfo("\t Time taken for adding to write entity : " +
 +        (System.currentTimeMillis() - startTime)/1000.0 + " seconds");
 +    dc = null; // reset data container to prevent it being added again.
 +    return dc;
 +  }
 +
 +  private void inferTaskInformation(TaskInformation ti) {
 +    // Find the first ancestor of this MoveTask which is some form of map reduce task
 +    // (Either standard, local, or a merge)
 +    while (ti.task.getParentTasks() != null && ti.task.getParentTasks().size() == 1) {
 +      ti.task = (Task)ti.task.getParentTasks().get(0);
 +      // If it was a merge task or a local map reduce task, nothing can be inferred
 +      if (ti.task instanceof MergeFileTask || ti.task instanceof MapredLocalTask) {
 +        break;
 +      }
 +
 +      // If it's a standard map reduce task, check what, if anything, it inferred about
 +      // the directory this move task is moving
 +      if (ti.task instanceof MapRedTask) {
 +        MapredWork work = (MapredWork)ti.task.getWork();
 +        MapWork mapWork = work.getMapWork();
 +        ti.bucketCols = mapWork.getBucketedColsByDirectory().get(ti.path);
 +        ti.sortCols = mapWork.getSortedColsByDirectory().get(ti.path);
 +        if (work.getReduceWork() != null) {
 +          ti.numBuckets = work.getReduceWork().getNumReduceTasks();
 +        }
 +
 +        if (ti.bucketCols != null || ti.sortCols != null) {
 +          // This must be a final map reduce task (the task containing the file sink
 +          // operator that writes the final output)
 +          assert work.isFinalMapRed();
 +        }
 +        break;
 +      }
 +
 +      // If it's a move task, get the path the files were moved from, this is what any
 +      // preceding map reduce task inferred information about, and moving does not invalidate
 +      // those assumptions
 +      // This can happen when a conditional merge is added before the final MoveTask, but the
 +      // condition for merging is not met, see GenMRFileSink1.
 +      if (ti.task instanceof MoveTask) {
 +        MoveTask mt = (MoveTask)ti.task;
 +        if (mt.getWork().getLoadFileWork() != null) {
 +          ti.path = mt.getWork().getLoadFileWork().getSourcePath().toUri().toString();
 +        }
 +      }
 +    }
 +  }
 +
 +  private void checkFileFormats(Hive db, LoadTableDesc tbd, Table table)
 +      throws HiveException {
 +    if (work.getCheckFileFormat()) {
 +      // Get all files from the src directory
 +      FileStatus[] dirs;
 +      ArrayList<FileStatus> files;
 +      FileSystem srcFs; // source filesystem
 +      try {
 +        srcFs = tbd.getSourcePath().getFileSystem(conf);
 +        dirs = srcFs.globStatus(tbd.getSourcePath());
 +        files = new ArrayList<FileStatus>();
 +        for (int i = 0; (dirs != null && i < dirs.length); i++) {
 +          files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER)));
 +          // We only check one file, so exit the loop when we have at least
 +          // one.
 +          if (files.size() > 0) {
 +            break;
 +          }
 +        }
 +      } catch (IOException e) {
 +        throw new HiveException(
 +            "addFiles: filesystem error in check phase", e);
 +      }
 +
 +      // handle file format check for table level
 +      if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) {
 +        boolean flag = true;
 +        // work.checkFileFormat is set to true only for Load Task, so assumption here is
 +        // dynamic partition context is null
 +        if (tbd.getDPCtx() == null) {
 +          if (tbd.getPartitionSpec() == null || tbd.getPartitionSpec().isEmpty()) {
 +            // Check if the file format of the file matches that of the table.
 +            flag = HiveFileFormatUtils.checkInputFormat(
 +                srcFs, conf, tbd.getTable().getInputFileFormatClass(), files);
 +          } else {
 +            // Check if the file format of the file matches that of the partition
 +            Partition oldPart = db.getPartition(table, tbd.getPartitionSpec(), false);
 +            if (oldPart == null) {
 +              // this means we have just created a table and are specifying partition in the
 +              // load statement (without pre-creating the partition), in which case lets use
 +              // table input format class. inheritTableSpecs defaults to true so when a new
 +              // partition is created later it will automatically inherit input format
 +              // from table object
 +              flag = HiveFileFormatUtils.checkInputFormat(
 +                  srcFs, conf, tbd.getTable().getInputFileFormatClass(), files);
 +            } else {
 +              flag = HiveFileFormatUtils.checkInputFormat(
 +                  srcFs, conf, oldPart.getInputFormatClass(), files);
 +            }
 +          }
 +          if (!flag) {
 +            throw new HiveException(
 +                "Wrong file format. Please check the file's format.");
 +          }
 +        } else {
 +          LOG.warn("Skipping file format check as dpCtx is not null");
 +        }
 +      }
 +    }
 +  }
 +
 +
    /**
     * so to make sure we crate WriteEntity with the right WriteType.  This is (at this point) only
     * for consistency since LockManager (which is the only thing that pays attention to WriteType)
@@@ -771,4 -674,4 +768,4 @@@
    public String getName() {
      return "MOVE";
    }
--}
++}

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 5b5ddc3,9036d9e..777c119
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@@ -23,59 -23,8 +23,60 @@@ import com.google.common.annotations.Vi
  import com.google.common.base.Preconditions;
  import com.google.common.collect.Lists;
  import com.google.common.collect.Sets;
 +import java.beans.DefaultPersistenceDelegate;
 +import java.beans.Encoder;
 +import java.beans.Expression;
 +import java.beans.Statement;
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataInput;
 +import java.io.EOFException;
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.OutputStream;
 +import java.io.Serializable;
 +import java.net.URI;
 +import java.net.URL;
 +import java.net.URLClassLoader;
 +import java.net.URLDecoder;
 +import java.sql.Connection;
 +import java.sql.DriverManager;
 +import java.sql.PreparedStatement;
 +import java.sql.SQLException;
 +import java.sql.SQLFeatureNotSupportedException;
 +import java.sql.SQLTransientException;
 +import java.text.SimpleDateFormat;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Calendar;
 +import java.util.Collection;
 +import java.util.Enumeration;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.LinkedHashMap;
 +import java.util.LinkedList;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +import java.util.Random;
 +import java.util.Set;
 +import java.util.UUID;
 +import java.util.concurrent.ConcurrentHashMap;
 +import java.util.concurrent.ExecutionException;
 +import java.util.concurrent.Future;
 +import java.util.concurrent.LinkedBlockingQueue;
 +import java.util.concurrent.ThreadPoolExecutor;
 +import java.util.concurrent.TimeUnit;
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +import java.util.zip.Deflater;
 +import java.util.zip.DeflaterOutputStream;
 +import java.util.zip.InflaterInputStream;
  import com.google.common.util.concurrent.ThreadFactoryBuilder;
+ 
  import org.apache.commons.codec.binary.Base64;
  import org.apache.commons.lang.StringUtils;
  import org.apache.commons.lang.WordUtils;
@@@ -201,9 -145,7 +204,8 @@@ import org.apache.hadoop.mapred.RecordR
  import org.apache.hadoop.mapred.Reporter;
  import org.apache.hadoop.mapred.SequenceFileInputFormat;
  import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 +import org.apache.hadoop.mapred.TextInputFormat;
  import org.apache.hadoop.util.Progressable;
- import org.apache.hadoop.util.Shell;
  import org.apache.hive.common.util.ACLConfigurationParser;
  import org.apache.hive.common.util.ReflectionUtil;
  import org.slf4j.Logger;
@@@ -3162,28 -3021,20 +3164,32 @@@ public final class Utilities 
  
      Set<Path> pathsProcessed = new HashSet<Path>();
      List<Path> pathsToAdd = new LinkedList<Path>();
+     LockedDriverState lDrvStat = LockedDriverState.getLockedDriverState();
      // AliasToWork contains all the aliases
 -    for (String alias : work.getAliasToWork().keySet()) {
 +    Collection<String> aliasToWork = work.getAliasToWork().keySet();
 +    if (!skipDummy) {
 +      // ConcurrentModification otherwise if adding dummy.
 +      aliasToWork = new ArrayList<>(aliasToWork);
 +    }
 +    for (String alias : aliasToWork) {
        LOG.info("Processing alias " + alias);
  
        // The alias may not have any path
 +      Collection<Map.Entry<Path, ArrayList<String>>> pathToAliases =
 +          work.getPathToAliases().entrySet();
 +      if (!skipDummy) {
 +        // ConcurrentModification otherwise if adding dummy.
 +        pathToAliases = new ArrayList<>(pathToAliases);
 +      }
        boolean isEmptyTable = true;
        boolean hasLogged = false;
 -      // Note: this copies the list because createDummyFileForEmptyPartition may modify the map.
 -      for (Path file : new LinkedList<Path>(work.getPathToAliases().keySet())) {
 +      Path path = null;
 +      for (Map.Entry<Path, ArrayList<String>> e : pathToAliases) {
+         if (lDrvStat != null && lDrvStat.driverState == DriverState.INTERRUPT)
+           throw new IOException("Operation is Canceled. ");
+ 
 -        List<String> aliases = work.getPathToAliases().get(file);
 +        Path file = e.getKey();
 +        List<String> aliases = e.getValue();
          if (aliases.contains(alias)) {
            if (file != null) {
              isEmptyTable = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
index 0b1ac4b,9a7e9d9..b018adb
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
@@@ -44,7 -43,8 +45,9 @@@ import org.apache.hadoop.conf.Configura
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.fs.PathFilter;
 +import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+ import org.apache.hadoop.hive.ql.Driver.DriverState;
+ import org.apache.hadoop.hive.ql.Driver.LockedDriverState;
  import org.apache.hadoop.hive.ql.exec.Operator;
  import org.apache.hadoop.hive.ql.exec.Utilities;
  import org.apache.hadoop.hive.ql.log.PerfLogger;
@@@ -363,9 -351,13 +366,13 @@@ public class CombineHiveInputFormat<K e
      Map<CombinePathInputFormat, CombineFilter> poolMap =
        new HashMap<CombinePathInputFormat, CombineFilter>();
      Set<Path> poolSet = new HashSet<Path>();
+     LockedDriverState lDrvStat = LockedDriverState.getLockedDriverState();
  
      for (Path path : paths) {
+       if (lDrvStat != null && lDrvStat.driverState == DriverState.INTERRUPT)
+         throw new IOException("Operation is Canceled. ");
+ 
 -      PartitionDesc part = HiveFileFormatUtils.getPartitionDescFromPathRecursively(
 +      PartitionDesc part = HiveFileFormatUtils.getFromPathRecursively(
            pathToPartitionInfo, path, IOPrepareCache.get().allocatePartitionDescMap());
        TableDesc tableDesc = part.getTableDesc();
        if ((tableDesc != null) && tableDesc.isNonNative()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index d255265,cdf2c40..01e8a48
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@@ -240,8 -370,8 +370,8 @@@ public final class DbTxnManager extend
            // This is a file or something we don't hold locks for.
            continue;
        }
-       if(t != null && AcidUtils.isFullAcidTable(t)) {
-         compBuilder.setIsAcid(true);
+       if(t != null) {
 -        compBuilder.setIsAcid(AcidUtils.isAcidTable(t));
++        compBuilder.setIsAcid(AcidUtils.isFullAcidTable(t));
        }
        LockComponent comp = compBuilder.build();
        LOG.debug("Adding lock component to lock request " + comp.toString());
@@@ -270,10 -426,9 +426,9 @@@
            break;
  
          case INSERT:
-           t = getTable(output);
+           assert t != null;
 -          if(AcidUtils.isAcidTable(t)) {
 +          if(AcidUtils.isFullAcidTable(t)) {
              compBuilder.setShared();
-             compBuilder.setIsAcid(true);
            }
            else {
              if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
@@@ -307,34 -459,11 +459,11 @@@
          default:
            throw new RuntimeException("Unknown write type " +
                output.getWriteType().toString());
- 
        }
-       switch (output.getType()) {
-         case DATABASE:
-           compBuilder.setDbName(output.getDatabase().getName());
-           break;
- 
-         case TABLE:
-         case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
-           t = output.getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         case PARTITION:
-           compBuilder.setPartitionName(output.getPartition().getName());
-           t = output.getPartition().getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         default:
-           // This is a file or something we don't hold locks for.
-           continue;
-       }
-       if(t != null && AcidUtils.isFullAcidTable(t)) {
-         compBuilder.setIsAcid(true);
+       if(t != null) {
 -        compBuilder.setIsAcid(AcidUtils.isAcidTable(t));
++        compBuilder.setIsAcid(AcidUtils.isFullAcidTable(t));
        }
+ 
        compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
        LockComponent comp = compBuilder.build();
        LOG.debug("Adding lock component to lock request " + comp.toString());

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index ea87cb4,5b908e8..6498199
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@@ -1578,30 -1601,26 +1608,42 @@@ public class Hive 
      return getDatabase(currentDb);
    }
  
-   public void loadSinglePartition(Path loadPath, String tableName,
+   /**
+    * @param loadPath
+    * @param tableName
+    * @param partSpec
+    * @param replace
+    * @param inheritTableSpecs
+    * @param isSkewedStoreAsSubdir
+    * @param isSrcLocal
+    * @param isAcid
+    * @param hasFollowingStatsTask
+    * @return
+    * @throws HiveException
+    */
+   public void loadPartition(Path loadPath, String tableName,
 -      Map<String, String> partSpec, boolean replace,
 -      boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
 -      boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
 +      Map<String, String> partSpec, boolean replace, boolean inheritTableSpecs,
 +      boolean isSkewedStoreAsSubdir,  boolean isSrcLocal, boolean isAcid,
 +      boolean hasFollowingStatsTask, Long mmWriteId, boolean isCommitMmWrite)
 +          throws HiveException {
      Table tbl = getTable(tableName);
 +    boolean isMmTableWrite = (mmWriteId != null);
 +    Preconditions.checkState(isMmTableWrite == MetaStoreUtils.isInsertOnlyTable(tbl.getParameters()));
      loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs,
 -        isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask);
 +        isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, mmWriteId);
 +    if (isMmTableWrite && isCommitMmWrite) {
 +      // The assumption behind committing here is that this partition is the only one outputted.
 +      commitMmTableWrite(tbl, mmWriteId);
 +    }
 +  }
 +
- 
 +  public void commitMmTableWrite(Table tbl, Long mmWriteId)
 +      throws HiveException {
 +    try {
 +      getMSC().finalizeTableWrite(tbl.getDbName(), tbl.getTableName(), mmWriteId, true);
 +    } catch (TException e) {
 +      throw new HiveException(e);
 +    }
    }
  
    /**
@@@ -1623,14 -1642,20 +1665,19 @@@
     *          location/inputformat/outputformat/serde details from table spec
     * @param isSrcLocal
     *          If the source directory is LOCAL
-    * @param isAcid true if this is an ACID operation
+    * @param isAcid
+    *          true if this is an ACID operation
+    * @param hasFollowingStatsTask
+    *          true if there is a following task which updates the stats, so, this method need not update.
+    * @return Partition object being loaded with data
     */
 -  public Partition loadPartition(Path loadPath, Table tbl,
 -      Map<String, String> partSpec, boolean replace,
 -      boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
 -      boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException {
 -
 +  public Partition loadPartition(Path loadPath, Table tbl, Map<String, String> partSpec,
 +      boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir,
 +      boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, Long mmWriteId)
 +          throws HiveException {
      Path tblDataLocationPath =  tbl.getDataLocation();
      try {
+       // Get the partition object if it already exists
        Partition oldPart = getPartition(tbl, partSpec, false);
        /**
         * Move files before creating the partition since down stream processes
@@@ -1668,41 -1693,21 +1715,45 @@@
        List<Path> newFiles = null;
        PerfLogger perfLogger = SessionState.getPerfLogger();
        perfLogger.PerfLogBegin("MoveTask", "FileMoves");
 -
+       // If config is set, table is not temporary and partition being inserted exists, capture
+       // the list of files added. For not yet existing partitions (insert overwrite to new partition
+       // or dynamic partition inserts), the add partition event will capture the list of files added.
+       if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && (null != oldPart)) {
+         newFiles = Collections.synchronizedList(new ArrayList<Path>());
+       }
 -
 -      if (replace || (oldPart == null && !isAcid)) {
 -        boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
 -        replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(),
 -            isSrcLocal, isAutoPurge, newFiles);
 +      // TODO: this assumes both paths are qualified; which they are, currently.
 +      if (mmWriteId != null && loadPath.equals(newPartPath)) {
 +        // MM insert query, move itself is a no-op.
 +        Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath + " (MM)");
 +        assert !isAcid;
 +        if (areEventsForDmlNeeded(tbl, oldPart)) {
 +          newFiles = listFilesCreatedByQuery(loadPath, mmWriteId);
 +        }
 +        Utilities.LOG14535.info("maybe deleting stuff from " + oldPartPath + " (new " + newPartPath + ") for replace");
 +        if (replace && oldPartPath != null) {
-           deleteOldPathForReplace(newPartPath, oldPartPath, getConf(),
++          boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
++          deleteOldPathForReplace(newPartPath, oldPartPath, getConf(), isAutoPurge,
 +              new ValidWriteIds.IdPathFilter(mmWriteId, false, true), mmWriteId != null,
 +              tbl.isStoredAsSubDirectories() ? tbl.getSkewedColNames().size() : 0);
 +        }
        } else {
 -        FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
 -        Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles);
 +        // Either a non-MM query, or a load into MM table from an external source.
 +        PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER;
 +        Path destPath = newPartPath;
 +        if (mmWriteId != null) {
 +          // We will load into MM directory, and delete from the parent if needed.
 +          destPath = new Path(destPath, ValidWriteIds.getMmFilePrefix(mmWriteId));
 +          filter = replace ? new ValidWriteIds.IdPathFilter(mmWriteId, false, true) : filter;
 +        }
 +        Utilities.LOG14535.info("moving " + loadPath + " to " + destPath);
 +        if (replace || (oldPart == null && !isAcid)) {
++          boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
 +          replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(),
-               isSrcLocal, filter, mmWriteId != null);
++              isSrcLocal, isAutoPurge, newFiles, filter, mmWriteId != null);
 +        } else {
-           if (areEventsForDmlNeeded(tbl, oldPart)) {
-             newFiles = Collections.synchronizedList(new ArrayList<Path>());
-           }
- 
 +          FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
 +          Hive.copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcid, newFiles);
 +        }
        }
        perfLogger.PerfLogEnd("MoveTask", "FileMoves");
        Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath);
@@@ -1774,54 -1783,6 +1829,54 @@@
      }
    }
  
 +
 +  private boolean areEventsForDmlNeeded(Table tbl, Partition oldPart) {
 +    return conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null;
 +  }
 +
 +  private List<Path> listFilesCreatedByQuery(Path loadPath, long mmWriteId) throws HiveException {
 +    List<Path> newFiles = new ArrayList<Path>();
 +    final String filePrefix = ValidWriteIds.getMmFilePrefix(mmWriteId);
 +    FileStatus[] srcs;
 +    FileSystem srcFs;
 +    try {
 +      srcFs = loadPath.getFileSystem(conf);
 +      srcs = srcFs.listStatus(loadPath);
 +    } catch (IOException e) {
 +      LOG.error("Error listing files", e);
 +      throw new HiveException(e);
 +    }
 +    if (srcs == null) {
 +      LOG.info("No sources specified: " + loadPath);
 +      return newFiles;
 +    }
 +    PathFilter subdirFilter = null;
-  
++
 +    // TODO: just like the move path, we only do one level of recursion.
 +    for (FileStatus src : srcs) {
 +      if (src.isDirectory()) {
 +        if (subdirFilter == null) {
 +          subdirFilter = new PathFilter() {
 +            @Override
 +            public boolean accept(Path path) {
 +              return path.getName().startsWith(filePrefix);
 +            }
 +          };
 +        }
 +        try {
 +          for (FileStatus srcFile : srcFs.listStatus(src.getPath(), subdirFilter)) {
 +            newFiles.add(srcFile.getPath());
 +          }
 +        } catch (IOException e) {
 +          throw new HiveException(e);
 +        }
 +      } else if (src.getPath().getName().startsWith(filePrefix)) {
 +        newFiles.add(src.getPath());
 +      }
 +    }
 +    return newFiles;
 +  }
 +
    private void setStatsPropAndAlterPartition(boolean hasFollowingStatsTask, Table tbl,
        Partition newTPart) throws MetaException, TException {
      EnvironmentContext environmentContext = null;
@@@ -2153,36 -2067,17 +2208,38 @@@ private void constructOneLBLocationMap(
      if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) {
        newFiles = Collections.synchronizedList(new ArrayList<Path>());
      }
 -    if (replace) {
 -      Path tableDest = tbl.getPath();
 -      boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
 -      replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal, isAutopurge, newFiles);
 +    // TODO: this assumes both paths are qualified; which they are, currently.
 +    if (mmWriteId != null && loadPath.equals(tbl.getPath())) {
 +      Utilities.LOG14535.info("not moving " + loadPath + " to " + tbl.getPath());
 +      if (replace) {
 +        Path tableDest = tbl.getPath();
-         deleteOldPathForReplace(tableDest, tableDest, sessionConf,
++        boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
++        deleteOldPathForReplace(tableDest, tableDest, sessionConf, isAutopurge,
 +            new ValidWriteIds.IdPathFilter(mmWriteId, false, true), mmWriteId != null,
 +            tbl.isStoredAsSubDirectories() ? tbl.getSkewedColNames().size() : 0);
 +      }
 +      newFiles = listFilesCreatedByQuery(loadPath, mmWriteId);
      } else {
 -      FileSystem fs;
 -      try {
 -        fs = tbl.getDataLocation().getFileSystem(sessionConf);
 -        copyFiles(sessionConf, loadPath, tbl.getPath(), fs, isSrcLocal, isAcid, newFiles);
 -      } catch (IOException e) {
 -        throw new HiveException("addFiles: filesystem error in check phase", e);
 +      // Either a non-MM query, or a load into MM table from an external source.
 +      Path tblPath = tbl.getPath(), destPath = tblPath;
 +      PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER;
 +      if (mmWriteId != null) {
 +        // We will load into MM directory, and delete from the parent if needed.
 +        destPath = new Path(destPath, ValidWriteIds.getMmFilePrefix(mmWriteId));
 +        filter = replace ? new ValidWriteIds.IdPathFilter(mmWriteId, false, true) : filter;
 +      }
 +      Utilities.LOG14535.info("moving " + loadPath + " to " + tblPath + " (replace = " + replace + ")");
 +      if (replace) {
++        boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge"));
 +        replaceFiles(tblPath, loadPath, destPath, tblPath,
-             sessionConf, isSrcLocal, filter, mmWriteId != null);
++            sessionConf, isSrcLocal, isAutopurge, newFiles, filter, mmWriteId != null);
 +      } else {
 +        try {
 +          FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf);
 +          copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, isAcid, newFiles);
 +        } catch (IOException e) {
 +          throw new HiveException("addFiles: filesystem error in check phase", e);
 +        }
        }
      }
      if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
@@@ -2217,11 -2112,7 +2274,11 @@@
        throw new HiveException(e);
      }
  
 +    if (mmWriteId != null) {
 +      commitMmTableWrite(tbl, mmWriteId);
 +    }
 +
-     fireInsertEvent(tbl, null, newFiles);
+     fireInsertEvent(tbl, null, replace, newFiles);
    }
  
    /**
@@@ -3270,21 -3182,17 +3347,18 @@@
        throw new HiveException(e.getMessage(), e);
      }
  
-     //needed for perm inheritance.
-     final boolean inheritPerms = HiveConf.getBoolVar(conf,
-         HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
      HdfsUtils.HadoopFileStatus destStatus = null;
  
 -    // If source path is a subdirectory of the destination path:
 +    // If source path is a subdirectory of the destination path (or the other way around):
      //   ex: INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
      //   where the staging directory is a subdirectory of the destination directory
      // (1) Do not delete the dest dir before doing the move operation.
      // (2) It is assumed that subdir and dir are in same encryption zone.
      // (3) Move individual files from scr dir to dest dir.
 -    boolean destIsSubDir = isSubDir(srcf, destf, srcFs, destFs, isSrcLocal);
 +    boolean srcIsSubDirOfDest = isSubDir(srcf, destf, srcFs, destFs, isSrcLocal),
 +        destIsSubDirOfSrc = isSubDir(destf, srcf, destFs, srcFs, false);
      try {
-       if (inheritPerms || replace) {
+       if (replace) {
          try{
            destStatus = new HdfsUtils.HadoopFileStatus(conf, destFs, destf);
            //if destf is an existing directory:
@@@ -3571,11 -3455,15 +3625,16 @@@
     * @param oldPath
     *          The directory where the old data location, need to be cleaned up.  Most of time, will be the same
     *          as destf, unless its across FileSystem boundaries.
+    * @param purge
+    *          When set to true files which needs to be deleted are not moved to Trash
     * @param isSrcLocal
     *          If the source directory is LOCAL
+    * @param newFiles
+    *          Output the list of new files replaced in the destination path
     */
    protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, HiveConf conf,
-           boolean isSrcLocal, PathFilter deletePathFilter, boolean isMmTable) throws HiveException {
 -          boolean isSrcLocal, boolean purge, List<Path> newFiles) throws HiveException {
++          boolean isSrcLocal, boolean purge, List<Path> newFiles, PathFilter deletePathFilter,
++          boolean isMmTable) throws HiveException {
      try {
  
        FileSystem destFs = destf.getFileSystem(conf);
@@@ -3594,9 -3482,37 +3653,9 @@@
        }
  
        if (oldPath != null) {
 -        boolean oldPathDeleted = false;
 -        boolean isOldPathUnderDestf = false;
 -        FileStatus[] statuses = null;
 -        try {
 -          FileSystem oldFs = oldPath.getFileSystem(conf);
 -          statuses = oldFs.listStatus(oldPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
 -          // Do not delete oldPath if:
 -          //  - destf is subdir of oldPath
 -          isOldPathUnderDestf = isSubDir(oldPath, destf, oldFs, destFs, false);
 -          if (isOldPathUnderDestf) {
 -            // if oldPath is destf or its subdir, its should definitely be deleted, otherwise its
 -            // existing content might result in incorrect (extra) data.
 -            // But not sure why we changed not to delete the oldPath in HIVE-8750 if it is
 -            // not the destf or its subdir?
 -            oldPathDeleted = trashFiles(oldFs, statuses, conf, purge);
 -          }
 -        } catch (IOException e) {
 -          if (isOldPathUnderDestf) {
 -            // if oldPath is a subdir of destf but it could not be cleaned
 -            throw new HiveException("Directory " + oldPath.toString()
 -                + " could not be cleaned up.", e);
 -          } else {
 -            //swallow the exception since it won't affect the final result
 -            LOG.warn("Directory " + oldPath.toString() + " cannot be cleaned: " + e, e);
 -          }
 -        }
 -        if (statuses != null && statuses.length > 0) {
 -          if (isOldPathUnderDestf && !oldPathDeleted) {
 -            throw new HiveException("Destination directory " + destf + " has not be cleaned up.");
 -          }
 -        }
 +        // TODO: we assume lbLevels is 0 here. Same as old code for non-MM.
 +        //       For MM tables, this can only be a LOAD command. Does LOAD even support LB?
-         deleteOldPathForReplace(destf, oldPath, conf, deletePathFilter, isMmTable, 0);
++        deleteOldPathForReplace(destf, oldPath, conf, purge, deletePathFilter, isMmTable, 0);
        }
  
        // first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates
@@@ -3631,69 -3557,6 +3700,69 @@@
      }
    }
  
-   private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf,
++  private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, boolean purge,
 +      PathFilter pathFilter, boolean isMmTable, int lbLevels) throws HiveException {
 +    Utilities.LOG14535.info("Deleting old paths for replace in " + destPath + " and old path " + oldPath);
 +    boolean isOldPathUnderDestf = false;
 +    try {
 +      FileSystem oldFs = oldPath.getFileSystem(conf);
 +      FileSystem destFs = destPath.getFileSystem(conf);
 +      // if oldPath is destf or its subdir, its should definitely be deleted, otherwise its
 +      // existing content might result in incorrect (extra) data.
 +      // But not sure why we changed not to delete the oldPath in HIVE-8750 if it is
 +      // not the destf or its subdir?
 +      isOldPathUnderDestf = isSubDir(oldPath, destPath, oldFs, destFs, false);
 +      if (isOldPathUnderDestf || isMmTable) {
 +        if (lbLevels == 0 || !isMmTable) {
-           cleanUpOneDirectoryForReplace(oldPath, oldFs, pathFilter, conf);
++          cleanUpOneDirectoryForReplace(oldPath, oldFs, pathFilter, conf, purge);
 +        } else {
 +          // We need to clean up different MM IDs from each LB directory separately.
 +          // Avoid temporary directories in the immediate table/part dir.
 +          // TODO: we could just find directories with any MM directories inside?
 +          //       the rest doesn't have to be cleaned up.
 +          String mask = "[^._]*";
 +          for (int i = 0; i < lbLevels - 1; ++i) {
 +            mask += Path.SEPARATOR + "*";
 +          }
 +          Path glob = new Path(oldPath, mask);
 +          FileStatus[] lbDirs = oldFs.globStatus(glob);
 +          for (FileStatus lbDir : lbDirs) {
 +            Path lbPath = lbDir.getPath();
 +            if (!lbDir.isDirectory()) {
 +              throw new HiveException("Unexpected path during overwrite: " + lbPath);
 +            }
 +            Utilities.LOG14535.info("Cleaning up LB directory " + lbPath);
-             cleanUpOneDirectoryForReplace(lbPath, oldFs, pathFilter, conf);
++            cleanUpOneDirectoryForReplace(lbPath, oldFs, pathFilter, conf, purge);
 +          }
 +        }
 +      }
 +    } catch (IOException e) {
 +      if (isOldPathUnderDestf || isMmTable) {
 +        // if oldPath is a subdir of destf but it could not be cleaned
 +        throw new HiveException("Directory " + oldPath.toString()
 +            + " could not be cleaned up.", e);
 +      } else {
 +        //swallow the exception since it won't affect the final result
 +        LOG.warn("Directory " + oldPath.toString() + " cannot be cleaned: " + e, e);
 +      }
 +    }
 +  }
 +
 +
 +  private void cleanUpOneDirectoryForReplace(Path path, FileSystem fs,
-       PathFilter pathFilter, HiveConf conf) throws IOException, HiveException {
++      PathFilter pathFilter, HiveConf conf, boolean purge) throws IOException, HiveException {
 +    FileStatus[] statuses = fs.listStatus(path, pathFilter);
 +    if (statuses == null || statuses.length == 0) return;
 +    String s = "Deleting files under " + path + " for replace: ";
 +    for (FileStatus file : statuses) {
 +      s += file.getPath().getName() + ", ";
 +    }
 +    Utilities.LOG14535.info(s);
-     if (!trashFiles(fs, statuses, conf)) {
++    if (!trashFiles(fs, statuses, conf, purge)) {
 +      throw new HiveException("Old path " + path + " has not been cleaned up.");
 +    }
 +  }
 +
  
    /**
     * Trashes or deletes all files under a directory. Leaves the directory as is.

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index f4fe6ac,dc86942..99a7392
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@@ -63,9 -58,9 +62,11 @@@ import org.apache.hadoop.hive.ql.metada
  import org.apache.hadoop.hive.ql.metadata.HiveException;
  import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
  import org.apache.hadoop.hive.ql.metadata.Table;
+ import org.apache.hadoop.hive.ql.parse.repl.load.MetaData;
  import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 +import org.apache.hadoop.hive.ql.plan.CopyWork;
 +import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
+ import org.apache.hadoop.hive.ql.plan.ImportTableDesc;
  import org.apache.hadoop.hive.ql.plan.DDLWork;
  import org.apache.hadoop.hive.ql.plan.DropTableDesc;
  import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
@@@ -217,8 -211,12 +218,13 @@@ public class ImportSemanticAnalyzer ext
  
      // Create table associated with the import
      // Executed if relevant, and used to contain all the other details about the table if not.
-     CreateTableDesc tblDesc = getBaseCreateTableDescFromTable(dbname,rv.getTable());
+     ImportTableDesc tblDesc;
+     try {
+       tblDesc = getBaseCreateTableDescFromTable(dbname, rv.getTable());
+     } catch (Exception e) {
+       throw new HiveException(e);
+     }
 +    boolean isSourceMm = MetaStoreUtils.isInsertOnlyTable(tblDesc.getTblProps());
  
      if ((replicationSpec!= null) && replicationSpec.isInReplicationScope()){
        tblDesc.setReplicationSpec(replicationSpec);
@@@ -297,18 -292,7 +303,22 @@@
        tableExists = true;
      }
  
 -    if (!replicationSpec.isInReplicationScope()){
 +    Long mmWriteId = null;
 +    if (table != null && MetaStoreUtils.isInsertOnlyTable(table.getParameters())) {
 +      mmWriteId = x.getHive().getNextTableWriteId(table.getDbName(), table.getTableName());
 +    } else if (table == null && isSourceMm) {
 +      // We could import everything as is - directories and IDs, but that won't work with ACID
 +      // txn ids in future. So, let's import everything into the new MM directory with ID == 0.
 +      mmWriteId = 0l;
 +    }
++    //todo due to master merge on May 4, tblDesc has been changed from CreateTableDesc to ImportTableDesc
++    // which may result in Import test failure
++    /*
 +    if (mmWriteId != null) {
 +      tblDesc.setInitialMmWriteId(mmWriteId);
 +    }
++    */
 +    if (!replicationSpec.isInReplicationScope()) {
        createRegularImportTasks(
            tblDesc, partitionDescs,
            isPartSpecSet, replicationSpec, table,
@@@ -455,10 -386,9 +431,10 @@@
      ), x.getConf());
    }
  
-  private static Task<?> addSinglePartition(URI fromURI, FileSystem fs, CreateTableDesc tblDesc,
+  private static Task<?> addSinglePartition(URI fromURI, FileSystem fs, ImportTableDesc tblDesc,
 -      Table table, Warehouse wh,
 -      AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x)
 +      Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec,
 +      EximUtil.SemanticAnalyzerWrapperContext x, Long mmWriteId, boolean isSourceMm,
 +      Task<?> commitTask)
        throws MetaException, IOException, HiveException {
      AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0);
      if (tblDesc.isExternal() && tblDesc.getLocation() == null) {
@@@ -476,39 -405,18 +452,39 @@@
            + partSpecToString(partSpec.getPartSpec())
            + " with source location: " + srcLocation);
        Path tgtLocation = new Path(partSpec.getLocation());
 -      Path tmpPath = x.getCtx().getExternalTmpPath(tgtLocation);
 -      Task<?> copyTask = ReplCopyTask.getLoadCopyTask(
 -          replicationSpec, new Path(srcLocation), tmpPath, x.getConf());
 +      Path destPath = mmWriteId == null ? x.getCtx().getExternalTmpPath(tgtLocation)
 +          : new Path(tgtLocation, ValidWriteIds.getMmFilePrefix(mmWriteId));
 +      Path moveTaskSrc =  mmWriteId == null ? destPath : tgtLocation;
 +      Utilities.LOG14535.info("adding import work for partition with source location: "
 +          + srcLocation + "; target: " + tgtLocation + "; copy dest " + destPath + "; mm "
 +          + mmWriteId + " (src " + isSourceMm + ") for " + partSpecToString(partSpec.getPartSpec()));
 +
 +
 +      Task<?> copyTask = null;
 +      if (replicationSpec.isInReplicationScope()) {
 +        if (isSourceMm || mmWriteId != null) {
 +          // TODO: ReplCopyTask is completely screwed. Need to support when it's not as screwed.
 +          throw new RuntimeException(
 +              "Not supported right now because Replication is completely screwed");
 +        }
 +        copyTask = ReplCopyTask.getLoadCopyTask(
 +            replicationSpec, new Path(srcLocation), destPath, x.getConf());
 +      } else {
 +        CopyWork cw = new CopyWork(new Path(srcLocation), destPath, false);
 +        cw.setSkipSourceMmDirs(isSourceMm);
 +        copyTask = TaskFactory.get(cw, x.getConf());
 +      }
 +
        Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(),
            x.getOutputs(), addPartitionDesc), x.getConf());
 -      LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath,
 -          Utilities.getTableDesc(table),
 -          partSpec.getPartSpec(), replicationSpec.isReplace());
 +      LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table),
-           partSpec.getPartSpec(), true, mmWriteId);
++          partSpec.getPartSpec(), replicationSpec.isReplace(), mmWriteId);
        loadTableWork.setInheritTableSpecs(false);
 +      // Do not commit the write ID from each task; need to commit once.
 +      // TODO: we should just change the import to use a single MoveTask, like dynparts.
 +      loadTableWork.setIntermediateInMmWrite(mmWriteId != null);
        Task<?> loadPartTask = TaskFactory.get(new MoveWork(
 -          x.getInputs(), x.getOutputs(), loadTableWork, null, false),
 -          x.getConf());
 +          x.getInputs(), x.getOutputs(), loadTableWork, null, false), x.getConf());
        copyTask.addDependentTask(loadPartTask);
        addPartTask.addDependentTask(loadPartTask);
        x.getTasks().add(copyTask);
@@@ -800,16 -707,16 +776,16 @@@
     * @param wh
     */
    private static void createRegularImportTasks(
-       CreateTableDesc tblDesc, List<AddPartitionDesc> partitionDescs, boolean isPartSpecSet,
 -      ImportTableDesc tblDesc,
 -      List<AddPartitionDesc> partitionDescs,
 -      boolean isPartSpecSet,
 -      ReplicationSpec replicationSpec,
 -      Table table, URI fromURI, FileSystem fs, Warehouse wh, EximUtil.SemanticAnalyzerWrapperContext x)
++      ImportTableDesc tblDesc, List<AddPartitionDesc> partitionDescs, boolean isPartSpecSet,
 +      ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh,
 +      EximUtil.SemanticAnalyzerWrapperContext x, Long mmWriteId, boolean isSourceMm)
        throws HiveException, URISyntaxException, IOException, MetaException {
  
 -    if (table != null){
 +    if (table != null) {
        if (table.isPartitioned()) {
          x.getLOG().debug("table partitioned");
 +        Task<?> ict = createImportCommitTask(
 +            table.getDbName(), table.getTableName(), mmWriteId, x.getConf());
  
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
@@@ -1026,8 -920,7 +1001,8 @@@
          }
          if (!replicationSpec.isMetadataOnly()) {
            // repl-imports are replace-into unless the event is insert-into
-           loadTable(fromURI, table, !replicationSpec.isInsert(), new Path(fromURI),
 -          loadTable(fromURI, table, replicationSpec.isReplace(), new Path(fromURI), replicationSpec, x);
++          loadTable(fromURI, table, replicationSpec.isReplace(), new Path(fromURI),
 +            replicationSpec, x, mmWriteId, isSourceMm);
          } else {
            x.getTasks().add(alterTableTask(tblDesc, x, replicationSpec));
          }

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 34f2ac4,5115fc8..1bd4f26
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@@ -7158,14 -7259,22 +7249,14 @@@ public class SemanticAnalyzer extends B
      } else if (dpCtx != null) {
        fileSinkDesc.setStaticSpec(dpCtx.getSPPath());
      }
 +    return fileSinkDesc;
 +  }
  
 -    if (isHiveServerQuery &&
 -      null != table_desc &&
 -      table_desc.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) &&
 -      HiveConf.getBoolVar(conf,HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) {
 -        fileSinkDesc.setIsUsingThriftJDBCBinarySerDe(true);
 -    } else {
 -        fileSinkDesc.setIsUsingThriftJDBCBinarySerDe(false);
 -    }
 -
 -    Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(
 -        fileSinkDesc, fsRS, input), inputRR);
 -
 +  private void handleLineage(LoadTableDesc ltd, Operator output)
 +      throws SemanticException {
      if (ltd != null && SessionState.get() != null) {
        SessionState.get().getLineageState()
-           .mapDirToOp(ltd.getSourcePath(), (FileSinkOperator) output);
+           .mapDirToOp(ltd.getSourcePath(), output);
      } else if ( queryState.getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName())) {
  
        Path tlocation = null;
@@@ -7178,51 -7287,30 +7269,51 @@@
        }
  
        SessionState.get().getLineageState()
-               .mapDirToOp(tlocation, (FileSinkOperator) output);
+               .mapDirToOp(tlocation, output);
      }
 +  }
  
 -    if (LOG.isDebugEnabled()) {
 -      LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: "
 -          + dest_path + " row schema: " + inputRR.toString());
 +  private WriteEntity generateTableWriteEntity(String dest, Table dest_tab,
 +      Map<String, String> partSpec, LoadTableDesc ltd,
 +      DynamicPartitionCtx dpCtx, boolean isNonNativeTable)
 +      throws SemanticException {
 +    WriteEntity output = null;
 +
 +    // Here only register the whole table for post-exec hook if no DP present
 +    // in the case of DP, we will register WriteEntity in MoveTask when the
 +    // list of dynamically created partitions are known.
 +    if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) {
 +      output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable, dest));
 +      if (!outputs.add(output)) {
 +        throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES
 +            .getMsg(dest_tab.getTableName()));
 +      }
      }
  
 -    FileSinkOperator fso = (FileSinkOperator) output;
 -    fso.getConf().setTable(dest_tab);
 -    fsopToTable.put(fso, dest_tab);
 -    // the following code is used to collect column stats when
 -    // hive.stats.autogather=true
 -    // and it is an insert overwrite or insert into table
 -    if (dest_tab != null && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)
 -        && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER)
 -        && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
 -      if (dest_type.intValue() == QBMetaData.DEST_TABLE) {
 -        genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo()
 -            .isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName()));
 -      } else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) {
 -        genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb
 -            .getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName()));
 -
 +    if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) {
 +      // No static partition specified
 +      if (dpCtx.getNumSPCols() == 0) {
 +        output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable, dest), false);
 +        outputs.add(output);
 +        output.setDynamicPartitionWrite(true);
 +      }
 +      // part of the partition specified
 +      // Create a DummyPartition in this case. Since, the metastore does not store partial
 +      // partitions currently, we need to store dummy partitions
 +      else {
 +        try {
 +          String ppath = dpCtx.getSPPath();
 +          ppath = ppath.substring(0, ppath.length() - 1);
 +          DummyPartition p =
 +              new DummyPartition(dest_tab, dest_tab.getDbName()
 +                  + "@" + dest_tab.getTableName() + "@" + ppath,
 +                  partSpec);
 +          output = new WriteEntity(p, getWriteType(dest), false);
 +          output.setDynamicPartitionWrite(true);
 +          outputs.add(output);
 +        } catch (HiveException e) {
 +          throw new SemanticException(e.getMessage(), e);
 +        }
        }
      }
      return output;
@@@ -7329,11 -7357,11 +7420,11 @@@
    // This method assumes you have already decided that this is an Acid write.  Don't call it if
    // that isn't true.
    private void checkAcidConstraints(QB qb, TableDesc tableDesc,
 -                                    Table table) throws SemanticException {
 +                                    Table table, AcidUtils.Operation acidOp) throws SemanticException {
      String tableName = tableDesc.getTableName();
 -    if (!qb.getParseInfo().isInsertIntoTable(tableName)) {
 +    if (!qb.getParseInfo().isInsertIntoTable(tableName) && !Operation.INSERT_ONLY.equals(acidOp)) {
        LOG.debug("Couldn't find table " + tableName + " in insertIntoTable");
-       throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getMsg());
+       throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID, tableName);
      }
      /*
      LOG.info("Modifying config values for ACID write");

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 91c343c,08a8f00..6629a0c
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@@ -316,80 -364,6 +316,80 @@@ public abstract class TaskCompiler 
      }
    }
  
 +  private void setLoadFileLocation(
 +      final ParseContext pCtx, LoadFileDesc lfd) throws SemanticException {
 +    // CTAS; make the movetask's destination directory the table's destination.
 +    Long mmWriteIdForCtas = null;
 +    FileSinkDesc dataSinkForCtas = null;
 +    String loc = null;
 +    if (pCtx.getQueryProperties().isCTAS()) {
 +      CreateTableDesc ctd = pCtx.getCreateTable();
 +      dataSinkForCtas = ctd.getAndUnsetWriter();
 +      mmWriteIdForCtas = ctd.getInitialMmWriteId();
 +      loc = ctd.getLocation();
 +    } else {
 +      loc = pCtx.getCreateViewDesc().getLocation();
 +    }
 +    Path location = (loc == null) ? getDefaultCtasLocation(pCtx) : new Path(loc);
 +    if (mmWriteIdForCtas != null) {
 +      dataSinkForCtas.setDirName(location);
 +      location = new Path(location, ValidWriteIds.getMmFilePrefix(mmWriteIdForCtas));
 +      lfd.setSourcePath(location);
 +      Utilities.LOG14535.info("Setting MM CTAS to  " + location);
 +    }
 +    Utilities.LOG14535.info("Location for LFD is being set to " + location + "; moving from " + lfd.getSourcePath());
 +    lfd.setTargetDir(location);
 +  }
 +
 +  private void createColumnStatsTasks(final ParseContext pCtx,
 +      final List<Task<? extends Serializable>> rootTasks,
 +      List<LoadFileDesc> loadFileWork, boolean isCStats, int outerQueryLimit)
 +      throws SemanticException {
 +    Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
 +    getLeafTasks(rootTasks, leafTasks);
 +    if (isCStats) {
 +      genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, leafTasks, outerQueryLimit, 0);
 +    } else {
 +      for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx
 +          .getColumnStatsAutoGatherContexts()) {
 +        if (!columnStatsAutoGatherContext.isInsertInto()) {
 +          genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(),
 +              columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, 0);
 +        } else {
 +          int numBitVector;
 +          try {
 +            numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
 +          } catch (Exception e) {
 +            throw new SemanticException(e.getMessage());
 +          }
 +          genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(),
 +              columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, numBitVector);
 +        }
 +      }
 +    }
 +  }
 +
 +  private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticException {
 +    try {
 +      String protoName = null;
 +      if (pCtx.getQueryProperties().isCTAS()) {
 +        protoName = pCtx.getCreateTable().getTableName();
 +      } else if (pCtx.getQueryProperties().isMaterializedView()) {
 +        protoName = pCtx.getCreateViewDesc().getViewName();
 +      }
 +      String[] names = Utilities.getDbTableName(protoName);
 +      if (!db.databaseExists(names[0])) {
 +        throw new SemanticException("ERROR: The database " + names[0] + " does not exist.");
 +      }
 +      Warehouse wh = new Warehouse(conf);
-       return wh.getTablePath(db.getDatabase(names[0]), names[1]);
++      return wh.getDefaultTablePath(db.getDatabase(names[0]), names[1]);
 +    } catch (HiveException e) {
 +      throw new SemanticException(e);
 +    } catch (MetaException e) {
 +      throw new SemanticException(e);
 +    }
 +  }
 +
    private void patchUpAfterCTASorMaterializedView(final List<Task<? extends Serializable>>  rootTasks,
                                                    final HashSet<WriteEntity> outputs,
                                                    Task<? extends Serializable> createTask) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkProcessAnalyzeTable.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------


[11/50] [abbrv] hive git commit: HIVE-16267 : Enable bootstrap function metadata to be loaded in repl load (Anishek Agarwal, reviewed by Sushanth Sowmyan)

Posted by we...@apache.org.
HIVE-16267 : Enable bootstrap function metadata to be loaded in repl load (Anishek Agarwal, reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9e9356b5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9e9356b5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9e9356b5

Branch: refs/heads/hive-14535
Commit: 9e9356b5e2cd03ff327ac1b269983454118e5f8e
Parents: f56abb4
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu May 4 01:37:59 2017 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Thu May 4 02:49:27 2017 -0700

----------------------------------------------------------------------
 .../hive/ql/TestReplicationScenarios.java       | 187 ++++++------
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   1 -
 .../apache/hadoop/hive/ql/parse/EximUtil.java   | 157 ++--------
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   5 +-
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 305 ++++++-------------
 .../hadoop/hive/ql/parse/repl/DumpType.java     |  45 +++
 .../dump/BootStrapReplicationSpecFunction.java  |  54 ++++
 .../hive/ql/parse/repl/dump/DBSerializer.java   |  54 ----
 .../ql/parse/repl/dump/FunctionSerializer.java  |  48 ---
 .../hive/ql/parse/repl/dump/HiveWrapper.java    |  73 +++++
 .../hive/ql/parse/repl/dump/JsonWriter.java     |  54 ----
 .../ql/parse/repl/dump/PartitionSerializer.java |  64 ----
 .../repl/dump/ReplicationSpecSerializer.java    |  36 ---
 .../ql/parse/repl/dump/TableSerializer.java     | 113 -------
 .../hadoop/hive/ql/parse/repl/dump/Utils.java   |  50 +++
 .../repl/dump/VersionCompatibleSerializer.java  |  37 ---
 .../ql/parse/repl/dump/io/DBSerializer.java     |  55 ++++
 .../parse/repl/dump/io/FunctionSerializer.java  |  49 +++
 .../hive/ql/parse/repl/dump/io/JsonWriter.java  |  55 ++++
 .../parse/repl/dump/io/PartitionSerializer.java |  65 ++++
 .../repl/dump/io/ReplicationSpecSerializer.java |  36 +++
 .../ql/parse/repl/dump/io/TableSerializer.java  | 114 +++++++
 .../dump/io/VersionCompatibleSerializer.java    |  37 +++
 .../parse/repl/events/AddPartitionHandler.java  |   6 +-
 .../repl/events/AlterPartitionHandler.java      |  28 +-
 .../ql/parse/repl/events/AlterTableHandler.java |  25 +-
 .../parse/repl/events/CreateTableHandler.java   |   6 +-
 .../ql/parse/repl/events/DefaultHandler.java    |   9 +-
 .../parse/repl/events/DropPartitionHandler.java |   9 +-
 .../ql/parse/repl/events/DropTableHandler.java  |   9 +-
 .../hive/ql/parse/repl/events/EventHandler.java |   6 +-
 .../ql/parse/repl/events/InsertHandler.java     |   9 +-
 .../hive/ql/parse/repl/load/DumpMetaData.java   | 143 +++++++++
 .../hive/ql/parse/repl/load/MetaData.java       |  64 ++++
 .../hive/ql/parse/repl/load/MetadataJson.java   | 128 ++++++++
 .../ql/parse/repl/dump/HiveWrapperTest.java     |  27 ++
 .../repl/events/TestEventHandlerFactory.java    |   4 +-
 37 files changed, 1273 insertions(+), 894 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
index b3cbae0..5173d8b 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
@@ -42,7 +42,9 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -62,17 +64,22 @@ import static org.junit.Assert.assertNull;
 
 public class TestReplicationScenarios {
 
-  final static String DBNOTIF_LISTENER_CLASSNAME = "org.apache.hive.hcatalog.listener.DbNotificationListener";
+  @Rule
+  public final TestName testName = new TestName();
+
+  private final static String DBNOTIF_LISTENER_CLASSNAME =
+      "org.apache.hive.hcatalog.listener.DbNotificationListener";
       // FIXME : replace with hive copy once that is copied
-  final static String tid =
+  private final static String tid =
       TestReplicationScenarios.class.getCanonicalName().replace('.','_') + "_" + System.currentTimeMillis();
-  final static String TEST_PATH = System.getProperty("test.warehouse.dir","/tmp") + Path.SEPARATOR + tid;
+  private final static String TEST_PATH =
+      System.getProperty("test.warehouse.dir", "/tmp") + Path.SEPARATOR + tid;
 
-  static HiveConf hconf;
-  static boolean useExternalMS = false;
-  static int msPort;
-  static Driver driver;
-  static HiveMetaStoreClient metaStoreClient;
+  private static HiveConf hconf;
+  private static boolean useExternalMS = false;
+  private static int msPort;
+  private static Driver driver;
+  private static HiveMetaStoreClient metaStoreClient;
 
   protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenarios.class);
   private ArrayList<String> lastResults;
@@ -141,6 +148,32 @@ public class TestReplicationScenarios {
     ReplicationSemanticAnalyzer.injectNextDumpDirForTest(String.valueOf(next));
   }
 
+  @Test
+  public void testFunctionReplicationAsPartOfBootstrap() throws IOException {
+    String dbName = createDB(testName.getMethodName());
+    run("CREATE FUNCTION " + dbName
+        + ".testFunction as 'com.yahoo.sketches.hive.theta.DataToSketchUDAF' "
+        + "using jar  'ivy://com.yahoo.datasketches:sketches-hive:0.8.2'");
+
+    String replicatedDbName = loadAndVerify(dbName);
+    run("SHOW FUNCTIONS LIKE '" + replicatedDbName + "*'");
+    verifyResults(new String[] { replicatedDbName + ".testFunction" });
+  }
+
+  private String loadAndVerify(String dbName) throws IOException {
+    advanceDumpDir();
+    run("REPL DUMP " + dbName);
+    String dumpLocation = getResult(0, 0);
+    String lastReplicationId = getResult(0, 1, true);
+    String replicatedDbName = dbName + "_replicated";
+    run("EXPLAIN REPL LOAD " + replicatedDbName + " FROM '" + dumpLocation + "'");
+    printOutput();
+    run("REPL LOAD " + replicatedDbName + " FROM '" + dumpLocation + "'");
+    verifyRun("REPL STATUS " + replicatedDbName, lastReplicationId);
+    return replicatedDbName;
+  }
+
+
   /**
    * Tests basic operation - creates a db, with 4 tables, 2 ptned and 2 unptned.
    * Inserts data into one of the ptned tables, and one of the unptned tables,
@@ -149,12 +182,8 @@ public class TestReplicationScenarios {
    */
   @Test
   public void testBasic() throws IOException {
-
-    String testName = "basic";
-    LOG.info("Testing "+testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String name = testName.getMethodName();
+    String dbName = createDB(name);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE");
@@ -165,9 +194,9 @@ public class TestReplicationScenarios {
     String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
     String[] empty = new String[]{};
 
-    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
+    String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
+    String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
+    String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
 
     createTestDataFile(unptn_locn, unptn_data);
     createTestDataFile(ptn_locn_1, ptn_data_1);
@@ -182,31 +211,19 @@ public class TestReplicationScenarios {
     verifySetup("SELECT a from " + dbName + ".ptned_empty", empty);
     verifySetup("SELECT * from " + dbName + ".unptned_empty", empty);
 
-    advanceDumpDir();
-    run("REPL DUMP " + dbName);
-    String replDumpLocn = getResult(0,0);
-    String replDumpId = getResult(0,1,true);
-    run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
-    printOutput();
-    run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'");
+    String replicatedDbName = loadAndVerify(dbName);
 
-    verifyRun("REPL STATUS " + dbName + "_dupe", replDumpId);
-
-    verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", ptn_data_1);
-    verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", ptn_data_2);
+    verifyRun("SELECT * from " + replicatedDbName + ".unptned", unptn_data);
+    verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=1", ptn_data_1);
+    verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=2", ptn_data_2);
     verifyRun("SELECT a from " + dbName + ".ptned_empty", empty);
     verifyRun("SELECT * from " + dbName + ".unptned_empty", empty);
   }
 
   @Test
   public void testBasicWithCM() throws Exception {
-
-    String testName = "basic_with_cm";
-    LOG.info("Testing "+testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String name = testName.getMethodName();
+    String dbName = createDB(name);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE");
@@ -218,10 +235,10 @@ public class TestReplicationScenarios {
     String[] ptn_data_2_later = new String[]{ "eighteen", "nineteen", "twenty"};
     String[] empty = new String[]{};
 
-    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
-    String ptn_locn_2_later = new Path(TEST_PATH , testName + "_ptn2_later").toUri().getPath();
+    String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
+    String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
+    String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
+    String ptn_locn_2_later = new Path(TEST_PATH, name + "_ptn2_later").toUri().getPath();
 
     createTestDataFile(unptn_locn, unptn_data);
     createTestDataFile(ptn_locn_1, ptn_data_1);
@@ -334,11 +351,8 @@ public class TestReplicationScenarios {
 
   @Test
   public void testIncrementalAdds() throws IOException {
-    String testName = "incrementalAdds";
-    LOG.info("Testing "+testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String name = testName.getMethodName();
+    String dbName = createDB(name);
 
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
@@ -357,9 +371,9 @@ public class TestReplicationScenarios {
     String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
     String[] empty = new String[]{};
 
-    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
+    String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
+    String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
+    String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
 
     createTestDataFile(unptn_locn, unptn_data);
     createTestDataFile(ptn_locn_1, ptn_data_1);
@@ -421,11 +435,8 @@ public class TestReplicationScenarios {
   @Test
   public void testDrops() throws IOException {
 
-    String testName = "drops";
-    LOG.info("Testing "+testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String name = testName.getMethodName();
+    String dbName = createDB(name);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE");
@@ -436,9 +447,9 @@ public class TestReplicationScenarios {
     String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"};
     String[] empty = new String[]{};
 
-    String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath();
-    String ptn_locn_1 = new Path(TEST_PATH , testName + "_ptn1").toUri().getPath();
-    String ptn_locn_2 = new Path(TEST_PATH , testName + "_ptn2").toUri().getPath();
+    String unptn_locn = new Path(TEST_PATH, name + "_unptn").toUri().getPath();
+    String ptn_locn_1 = new Path(TEST_PATH, name + "_ptn1").toUri().getPath();
+    String ptn_locn_2 = new Path(TEST_PATH, name + "_ptn2").toUri().getPath();
 
     createTestDataFile(unptn_locn, unptn_data);
     createTestDataFile(ptn_locn_1, ptn_data_1);
@@ -535,10 +546,7 @@ public class TestReplicationScenarios {
   public void testDropsWithCM() throws IOException {
 
     String testName = "drops_with_cm";
-    LOG.info("Testing "+testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String dbName = createDB(testName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE");
@@ -661,10 +669,7 @@ public class TestReplicationScenarios {
   public void testAlters() throws IOException {
 
     String testName = "alters";
-    LOG.info("Testing "+testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String dbName = createDB(testName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".unptned2(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE");
@@ -846,10 +851,7 @@ public class TestReplicationScenarios {
   @Test
   public void testIncrementalLoad() throws IOException {
     String testName = "incrementalLoad";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String dbName = createDB(testName);
 
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
@@ -934,10 +936,7 @@ public class TestReplicationScenarios {
   @Test
   public void testIncrementalInserts() throws IOException {
     String testName = "incrementalInserts";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String dbName = createDB(testName);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
 
     advanceDumpDir();
@@ -1062,10 +1061,7 @@ public class TestReplicationScenarios {
   @Test
   public void testViewsReplication() throws IOException {
     String testName = "viewsReplication";
-    LOG.info("Testing "+testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String dbName = createDB(testName);
 
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
     run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE");
@@ -1142,11 +1138,8 @@ public class TestReplicationScenarios {
 
   @Test
   public void testDumpLimit() throws IOException {
-    String testName = "dumpLimit";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String name = testName.getMethodName();
+    String dbName = createDB(name);
     run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE");
 
     advanceDumpDir();
@@ -1530,11 +1523,8 @@ public class TestReplicationScenarios {
 
     // Now, to actually testing status - first, we bootstrap.
 
-    String testName = "incrementalStatus";
-    LOG.info("Testing " + testName);
-    String dbName = testName + "_" + tid;
-
-    run("CREATE DATABASE " + dbName);
+    String name = testName.getMethodName();
+    String dbName = createDB(name);
     advanceDumpDir();
     run("REPL DUMP " + dbName);
     String lastReplDumpLocn = getResult(0, 0);
@@ -1589,6 +1579,13 @@ public class TestReplicationScenarios {
 
   }
 
+  private static String createDB(String name) {
+    LOG.info("Testing " + name);
+    String dbName = name + "_" + tid;
+    run("CREATE DATABASE " + dbName);
+    return dbName;
+  }
+
   @Test
   public void testEventFilters(){
     // Test testing that the filters introduced by EventUtils are working correctly.
@@ -1749,18 +1746,25 @@ public class TestReplicationScenarios {
     return (lastResults.get(rowNum).split("\\t"))[colNum];
   }
 
+  /**
+   * All the results that are read from the hive output will not preserve
+   * case sensitivity and will all be in lower case, hence we will check against
+   * only lower case data values.
+   * Unless for Null Values it actually returns in UpperCase and hence explicitly lowering case
+   * before assert.
+   */
   private void verifyResults(String[] data) throws IOException {
     List<String> results = getOutput();
-    LOG.info("Expecting {}",data);
-    LOG.info("Got {}",results);
-    assertEquals(data.length,results.size());
-    for (int i = 0; i < data.length; i++){
-      assertEquals(data[i],results.get(i));
+    LOG.info("Expecting {}", data);
+    LOG.info("Got {}", results);
+    assertEquals(data.length, results.size());
+    for (int i = 0; i < data.length; i++) {
+      assertEquals(data[i].toLowerCase(), results.get(i).toLowerCase());
     }
   }
 
   private List<String> getOutput() throws IOException {
-    List<String> results = new ArrayList<String>();
+    List<String> results = new ArrayList<>();
     try {
       driver.getResults(results);
     } catch (CommandNeedRetryException e) {
@@ -1848,5 +1852,4 @@ public class TestReplicationScenarios {
       }
     }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 5b49dfd..5b908e8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1437,7 +1437,6 @@ public class Hive {
    */
   public List<String> getTablesByType(String dbName, String pattern, TableType type)
       throws HiveException {
-    List<String> retList = new ArrayList<String>();
     if (dbName == null)
       dbName = SessionState.get().getCurrentDatabase();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
index 1ea5182..a9384be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
@@ -18,43 +18,36 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import com.google.common.base.Function;
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.parse.repl.dump.DBSerializer;
-import org.apache.hadoop.hive.ql.parse.repl.dump.JsonWriter;
-import org.apache.hadoop.hive.ql.parse.repl.dump.ReplicationSpecSerializer;
-import org.apache.hadoop.hive.ql.parse.repl.dump.TableSerializer;
-import org.apache.thrift.TDeserializer;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.DBSerializer;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.ReplicationSpecSerializer;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.TableSerializer;
+import org.apache.hadoop.hive.ql.parse.repl.load.MetaData;
+import org.apache.hadoop.hive.ql.parse.repl.load.MetadataJson;
 import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TJSONProtocol;
-import org.json.JSONArray;
 import org.json.JSONException;
-import org.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import javax.annotation.Nullable;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.Serializable;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -270,124 +263,30 @@ public class EximUtil {
     }
   }
 
-  /**
-   * Utility class to help return complex value from readMetaData function
-   */
-  public static class ReadMetaData {
-    private final Database db;
-    private final Table table;
-    private final Iterable<Partition> partitions;
-    private final ReplicationSpec replicationSpec;
-
-    public ReadMetaData(){
-      this(null,null,null,new ReplicationSpec());
-    }
-    public ReadMetaData(Database db, Table table, Iterable<Partition> partitions, ReplicationSpec replicationSpec){
-      this.db = db;
-      this.table = table;
-      this.partitions = partitions;
-      this.replicationSpec = replicationSpec;
-    }
-
-    public Database getDatabase(){
-      return db;
-    }
-
-    public Table getTable() {
-      return table;
-    }
-
-    public Iterable<Partition> getPartitions() {
-      return partitions;
-    }
-
-    public ReplicationSpec getReplicationSpec() {
-      return replicationSpec;
-    }
-  };
-
-  public static ReadMetaData readMetaData(FileSystem fs, Path metadataPath)
+  static MetaData readMetaData(FileSystem fs, Path metadataPath)
       throws IOException, SemanticException {
-    FSDataInputStream mdstream = null;
+    String message = readAsString(fs, metadataPath);
     try {
-      mdstream = fs.open(metadataPath);
+      return new MetadataJson(message).getMetaData();
+    } catch (TException | JSONException e) {
+      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METADATA.getMsg(), e);
+    }
+  }
+
+  private static String readAsString(final FileSystem fs, final Path fromMetadataPath)
+      throws IOException {
+    try (FSDataInputStream stream = fs.open(fromMetadataPath)) {
       byte[] buffer = new byte[1024];
       ByteArrayOutputStream sb = new ByteArrayOutputStream();
-      int read = mdstream.read(buffer);
+      int read = stream.read(buffer);
       while (read != -1) {
         sb.write(buffer, 0, read);
-        read = mdstream.read(buffer);
-      }
-      String md = new String(sb.toByteArray(), "UTF-8");
-      JSONObject jsonContainer = new JSONObject(md);
-      String version = jsonContainer.getString("version");
-      String fcversion = getJSONStringEntry(jsonContainer, "fcversion");
-      checkCompatibility(version, fcversion);
-
-      String dbDesc = getJSONStringEntry(jsonContainer, "db");
-      String tableDesc = getJSONStringEntry(jsonContainer,"table");
-      TDeserializer deserializer = new TDeserializer(new TJSONProtocol.Factory());
-
-      Database db = null;
-      if (dbDesc != null){
-        db = new Database();
-        deserializer.deserialize(db, dbDesc, "UTF-8");
-      }
-
-      Table table = null;
-      List<Partition> partitionsList = null;
-      if (tableDesc != null){
-        table = new Table();
-        deserializer.deserialize(table, tableDesc, "UTF-8");
-        // TODO : jackson-streaming-iterable-redo this
-        JSONArray jsonPartitions = new JSONArray(jsonContainer.getString("partitions"));
-        partitionsList = new ArrayList<Partition>(jsonPartitions.length());
-        for (int i = 0; i < jsonPartitions.length(); ++i) {
-          String partDesc = jsonPartitions.getString(i);
-          Partition partition = new Partition();
-          deserializer.deserialize(partition, partDesc, "UTF-8");
-          partitionsList.add(partition);
-        }
-      }
-
-      return new ReadMetaData(db, table, partitionsList,readReplicationSpec(jsonContainer));
-    } catch (JSONException e) {
-      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METADATA.getMsg(), e);
-    } catch (TException e) {
-      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METADATA.getMsg(), e);
-    } finally {
-      if (mdstream != null) {
-        mdstream.close();
+        read = stream.read(buffer);
       }
+      return new String(sb.toByteArray(), "UTF-8");
     }
   }
 
-  private static ReplicationSpec readReplicationSpec(final JSONObject jsonContainer){
-    Function<String,String> keyFetcher = new Function<String, String>() {
-      @Override
-      public String apply(@Nullable String s) {
-        return getJSONStringEntry(jsonContainer,s);
-      }
-    };
-    return new ReplicationSpec(keyFetcher);
-  }
-
-  private static String getJSONStringEntry(JSONObject jsonContainer, String name) {
-    String retval = null;
-    try {
-      retval = jsonContainer.getString(name);
-    } catch (JSONException ignored) {}
-    return retval;
-  }
-
-  /* check the forward and backward compatibility */
-  private static void checkCompatibility(String version, String fcVersion) throws SemanticException {
-    doCheckCompatibility(
-        METADATA_FORMAT_VERSION,
-        version,
-        fcVersion);
-  }
-
   /* check the forward and backward compatibility */
   public static void doCheckCompatibility(String currVersion,
       String version, String fcVersion) throws SemanticException {

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 71d6074..dc86942 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -25,7 +25,6 @@ import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -38,7 +37,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
@@ -60,6 +58,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.parse.repl.load.MetaData;
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
 import org.apache.hadoop.hive.ql.plan.ImportTableDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
@@ -186,7 +185,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
     FileSystem fs = FileSystem.get(fromURI, x.getConf());
     x.getInputs().add(toReadEntity(fromPath, x.getConf()));
 
-    EximUtil.ReadMetaData rv = new EximUtil.ReadMetaData();
+    MetaData rv = new MetaData();
     try {
       rv =  EximUtil.readMetaData(fs, new Path(fromPath, EximUtil.METADATA_NAME));
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
index 37aa3ba..2daa123 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
@@ -17,77 +17,70 @@
  */
 package org.apache.hadoop.hive.ql.parse;
 
-import com.google.common.base.Function;
-import com.google.common.collect.Iterables;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
 import com.google.common.primitives.Ints;
 import org.antlr.runtime.tree.Tree;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.Predicate;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.ReplChangeManager;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter;
 import org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFilter;
-import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage;
 import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
 import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
-import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage;
 import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage;
 import org.apache.hadoop.hive.metastore.messaging.DropTableMessage;
 import org.apache.hadoop.hive.metastore.messaging.EventUtils;
 import org.apache.hadoop.hive.metastore.messaging.InsertMessage;
 import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
 import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
-import org.apache.hadoop.hive.metastore.messaging.PartitionFiles;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.repl.dump.FunctionSerializer;
-import org.apache.hadoop.hive.ql.parse.repl.dump.JsonWriter;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
+import org.apache.hadoop.hive.ql.parse.repl.dump.HiveWrapper;
+import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.FunctionSerializer;
+import org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter;
+import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
 import org.apache.hadoop.hive.ql.parse.repl.events.EventHandler;
 import org.apache.hadoop.hive.ql.parse.repl.events.EventHandlerFactory;
+import org.apache.hadoop.hive.ql.parse.repl.load.MetaData;
 import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
+import org.apache.hadoop.hive.ql.plan.CreateFunctionDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork;
 import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.FunctionWork;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc;
 import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.hadoop.io.IOUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.annotation.Nullable;
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
 import java.io.Serializable;
 import java.net.URI;
 import java.util.ArrayList;
@@ -120,154 +113,8 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
   private static String testInjectDumpDir = null; // unit tests can overwrite this to affect default dump behaviour
   private static final String dumpSchema = "dump_dir,last_repl_id#string,string";
 
-  public static final String DUMPMETADATA = "_dumpmetadata";
-
-  public enum DUMPTYPE {
-    BOOTSTRAP("BOOTSTRAP"),
-    INCREMENTAL("INCREMENTAL"),
-    EVENT_CREATE_TABLE("EVENT_CREATE_TABLE"),
-    EVENT_ADD_PARTITION("EVENT_ADD_PARTITION"),
-    EVENT_DROP_TABLE("EVENT_DROP_TABLE"),
-    EVENT_DROP_PARTITION("EVENT_DROP_PARTITION"),
-    EVENT_ALTER_TABLE("EVENT_ALTER_TABLE"),
-    EVENT_RENAME_TABLE("EVENT_RENAME_TABLE"),
-    EVENT_TRUNCATE_TABLE("EVENT_TRUNCATE_TABLE"),
-    EVENT_ALTER_PARTITION("EVENT_ALTER_PARTITION"),
-    EVENT_RENAME_PARTITION("EVENT_RENAME_PARTITION"),
-    EVENT_TRUNCATE_PARTITION("EVENT_TRUNCATE_PARTITION"),
-    EVENT_INSERT("EVENT_INSERT"),
-    EVENT_UNKNOWN("EVENT_UNKNOWN");
-
-    String type = null;
-    DUMPTYPE(String type) {
-      this.type = type;
-    }
-
-    @Override
-    public String toString(){
-      return type;
-    }
-
-  };
-
-  public static class DumpMetaData {
-    // wrapper class for reading and writing metadata about a dump
-    // responsible for _dumpmetadata files
-
-    private DUMPTYPE dumpType;
-    private Long eventFrom = null;
-    private Long eventTo = null;
-    private String payload = null;
-    private boolean initialized = false;
-
-    private final Path dumpRoot;
-    private final Path dumpFile;
-    private final HiveConf hiveConf;
-    private Path cmRoot;
-
-    public DumpMetaData(Path dumpRoot, HiveConf hiveConf) {
-      this.dumpRoot = dumpRoot;
-      this.hiveConf = hiveConf;
-      dumpFile = new Path(dumpRoot, DUMPMETADATA);
-    }
-
-    public DumpMetaData(Path dumpRoot, DUMPTYPE lvl, Long eventFrom, Long eventTo, Path cmRoot,
-        HiveConf hiveConf){
-      this(dumpRoot,hiveConf);
-      setDump(lvl, eventFrom, eventTo, cmRoot);
-    }
-
-    public void setDump(DUMPTYPE lvl, Long eventFrom, Long eventTo, Path cmRoot){
-      this.dumpType = lvl;
-      this.eventFrom = eventFrom;
-      this.eventTo = eventTo;
-      this.initialized = true;
-      this.cmRoot = cmRoot;
-    }
-
-    public void loadDumpFromFile() throws SemanticException {
-      try {
-        // read from dumpfile and instantiate self
-        FileSystem fs = dumpFile.getFileSystem(hiveConf);
-        BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(dumpFile)));
-        String line = null;
-        if ( (line = br.readLine()) != null){
-          String[] lineContents = line.split("\t", 5);
-          setDump(DUMPTYPE.valueOf(lineContents[0]), Long.valueOf(lineContents[1]), Long.valueOf(lineContents[2]),
-              new Path(lineContents[3]));
-          setPayload(lineContents[4].equals(Utilities.nullStringOutput) ? null : lineContents[4]);
-          ReplChangeManager.setCmRoot(cmRoot);
-        } else {
-          throw new IOException("Unable to read valid values from dumpFile:"+dumpFile.toUri().toString());
-        }
-      } catch (IOException ioe){
-        throw new SemanticException(ioe);
-      }
-    }
-
-    public DUMPTYPE getDumpType() throws SemanticException {
-      initializeIfNot();
-      return this.dumpType;
-    }
-
-    public String getPayload() throws SemanticException {
-      initializeIfNot();
-      return this.payload;
-    }
-
-    public void setPayload(String payload) {
-      this.payload = payload;
-    }
-
-    public Long getEventFrom() throws SemanticException {
-      initializeIfNot();
-      return eventFrom;
-    }
-
-    public Long getEventTo() throws SemanticException {
-      initializeIfNot();
-      return eventTo;
-    }
-
-    public Path getCmRoot() {
-      return cmRoot;
-    }
-
-    public void setCmRoot(Path cmRoot) {
-      this.cmRoot = cmRoot;
-    }
-
-    public Path getDumpFilePath() {
-      return dumpFile;
-    }
-
-    public boolean isIncrementalDump() throws SemanticException {
-      initializeIfNot();
-      return (this.dumpType == DUMPTYPE.INCREMENTAL);
-    }
-
-    private void initializeIfNot() throws SemanticException {
-      if (!initialized){
-        loadDumpFromFile();
-      }
-    }
-
-    public void write() throws SemanticException {
-      writeOutput(
-          Arrays.asList(
-              dumpType.toString(),
-              eventFrom.toString(),
-              eventTo.toString(),
-              cmRoot.toString(),
-              payload),
-          dumpFile,
-          hiveConf
-      );
-    }
 
-  }
-
-  public ReplicationSemanticAnalyzer(QueryState queryState) throws SemanticException {
+  ReplicationSemanticAnalyzer(QueryState queryState) throws SemanticException {
     super(queryState);
   }
 
@@ -387,7 +234,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
         LOG.info(
             "Consolidation done, preparing to return {},{}->{}",
             dumpRoot.toUri(), bootDumpBeginReplId, bootDumpEndReplId);
-        dmd.setDump(DUMPTYPE.BOOTSTRAP, bootDumpBeginReplId, bootDumpEndReplId, cmRoot);
+        dmd.setDump(DumpType.BOOTSTRAP, bootDumpBeginReplId, bootDumpEndReplId, cmRoot);
         dmd.write();
 
         // Set the correct last repl id to return to the user
@@ -433,10 +280,14 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
         }
 
         LOG.info("Done dumping events, preparing to return {},{}", dumpRoot.toUri(), lastReplId);
-        writeOutput(
-            Arrays.asList("incremental", String.valueOf(eventFrom), String.valueOf(lastReplId)),
+        Utils.writeOutput(
+            Arrays.asList(
+                "incremental",
+                String.valueOf(eventFrom),
+                String.valueOf(lastReplId)
+            ),
             dmd.getDumpFilePath(), conf);
-        dmd.setDump(DUMPTYPE.INCREMENTAL, eventFrom, lastReplId, cmRoot);
+        dmd.setDump(DumpType.INCREMENTAL, eventFrom, lastReplId, cmRoot);
         dmd.write();
       }
       prepareReturnValues(Arrays.asList(dumpRoot.toUri().toString(), String.valueOf(lastReplId)), dumpSchema);
@@ -463,7 +314,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
     testInjectDumpDir = dumpdir;
   }
 
-  String getNextDumpDir() {
+  private String getNextDumpDir() {
     if (conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)) {
       // make it easy to write .q unit tests, instead of unique id generation.
       // however, this does mean that in writing tests, we have to be aware that
@@ -494,8 +345,8 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
       // TODO : instantiating FS objects are generally costly. Refactor
       FileSystem fs = dbRoot.getFileSystem(conf);
       Path dumpPath = new Path(dbRoot, EximUtil.METADATA_NAME);
-      Database dbObj = db.getDatabase(dbName);
-      EximUtil.createDbExportDump(fs, dumpPath, dbObj, getNewReplicationSpec());
+      HiveWrapper.Tuple<Database> database = new HiveWrapper(db, dbName).database();
+      EximUtil.createDbExportDump(fs, dumpPath, database.object, database.replicationSpec);
     } catch (Exception e) {
       // TODO : simple wrap & rethrow for now, clean up with error codes
       throw new SemanticException(e);
@@ -513,9 +364,16 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
       // TODO : This should ideally return the Function Objects and not Strings(function names) that should be done by the caller, Look at this separately.
       List<String> functionNames = db.getFunctions(dbName, "*");
       for (String functionName : functionNames) {
-        org.apache.hadoop.hive.metastore.api.Function function =
-            db.getFunction(dbName, functionName);
-        if (function.getResourceUris().isEmpty()) {
+        HiveWrapper.Tuple<Function> tuple;
+        try {
+          tuple = new HiveWrapper(db, dbName).function(functionName);
+        } catch (HiveException e) {
+          //This can happen as we are querying the getFunctions before we are getting the actual function
+          //in between there can be a drop function by a user in which case our call will fail.
+          LOG.info("Function " + functionName + " could not be found, we are ignoring it as it can be a valid state ", e);
+          continue;
+        }
+        if (tuple.object.getResourceUris().isEmpty()) {
           SESSION_STATE_LOG.warn(
               "Not replicating function: " + functionName + " as it seems to have been created "
                   + "without USING clause");
@@ -526,7 +384,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
             new Path(new Path(functionsRoot, functionName), FUNCTION_METADATA_DIR_NAME);
         try (JsonWriter jsonWriter = new JsonWriter(functionMetadataRoot.getFileSystem(conf),
             functionMetadataRoot)) {
-          new FunctionSerializer(function).writeTo(jsonWriter, getNewReplicationSpec());
+          new FunctionSerializer(tuple.object).writeTo(jsonWriter, tuple.replicationSpec);
         }
       }
     } catch (Exception e) {
@@ -738,7 +596,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
                 taskChainTail.getClass(), taskChainTail.getId(), barrierTask.getClass(), barrierTask.getId());
             taskChainTail = barrierTask;
             evstage++;
-            lastEvid = dmd.eventTo;
+            lastEvid = dmd.getEventTo();
           }
         }
 
@@ -1121,7 +979,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
       // associated with that
       // Then, we iterate over all subdirs, and create table imports for each.
 
-      EximUtil.ReadMetaData rv = new EximUtil.ReadMetaData();
+      MetaData rv = new MetaData();
       try {
         rv = EximUtil.readMetaData(fs, new Path(dir.getPath(), EximUtil.METADATA_NAME));
       } catch (IOException e) {
@@ -1163,15 +1021,67 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
       rootTasks.add(dbRootTask);
       FileStatus[] dirsInDbPath = fs.listStatus(dir.getPath(), EximUtil.getDirectoryFilter(fs));
 
-      for (FileStatus tableDir : dirsInDbPath) {
+      for (FileStatus tableDir : Collections2.filter(Arrays.asList(dirsInDbPath), new TableDirPredicate())) {
         analyzeTableLoad(
             dbName, null, tableDir.getPath().toUri().toString(), dbRootTask, null, null);
       }
+
+      //Function load
+      Path functionMetaDataRoot = new Path(dir.getPath(), FUNCTIONS_ROOT_DIR_NAME);
+      if (fs.exists(functionMetaDataRoot)) {
+        List<FileStatus> functionDirectories =
+            Arrays.asList(fs.listStatus(functionMetaDataRoot, EximUtil.getDirectoryFilter(fs)));
+        for (FileStatus functionDir : functionDirectories) {
+          analyzeFunctionLoad(dbName, functionDir, dbRootTask);
+        }
+      }
     } catch (Exception e) {
       throw new SemanticException(e);
     }
   }
 
+  private static class TableDirPredicate implements Predicate<FileStatus> {
+    @Override
+    public boolean apply(FileStatus fileStatus) {
+      return !fileStatus.getPath().getName().contains(FUNCTIONS_ROOT_DIR_NAME);
+    }
+  }
+
+  private void analyzeFunctionLoad(String dbName, FileStatus functionDir,
+      Task<? extends Serializable> createDbTask) throws IOException, SemanticException {
+    URI fromURI = EximUtil
+        .getValidatedURI(conf, stripQuotes(functionDir.getPath().toUri().toString()));
+    Path fromPath = new Path(fromURI.getScheme(), fromURI.getAuthority(), fromURI.getPath());
+
+    FileSystem fs = FileSystem.get(fromURI, conf);
+    inputs.add(toReadEntity(fromPath, conf));
+
+    try {
+      MetaData metaData = EximUtil.readMetaData(fs, new Path(fromPath, EximUtil.METADATA_NAME));
+      ReplicationSpec replicationSpec = metaData.getReplicationSpec();
+      if (replicationSpec.isNoop()) {
+        // nothing to do here, silently return.
+        return;
+      }
+      CreateFunctionDesc desc = new CreateFunctionDesc(
+          dbName + "." + metaData.function.getFunctionName(),
+          false,
+          metaData.function.getClassName(),
+          metaData.function.getResourceUris()
+      );
+
+      Task<FunctionWork> currentTask = TaskFactory.get(new FunctionWork(desc), conf);
+      if (createDbTask != null) {
+        createDbTask.addDependentTask(currentTask);
+        LOG.debug("Added {}:{} as a precursor of {}:{}",
+            createDbTask.getClass(), createDbTask.getId(), currentTask.getClass(),
+            currentTask.getId());
+      }
+    } catch (IOException e) {
+      throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
+    }
+  }
+
   private List<Task<? extends Serializable>> analyzeTableLoad(
       String dbName, String tblName, String locn,
       Task<? extends Serializable> precursor,
@@ -1270,27 +1180,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
       LOG.debug("    > " + s);
     }
     ctx.setResFile(ctx.getLocalTmpPath());
-    writeOutput(values, ctx.getResFile(), conf);
-  }
-
-  private static void writeOutput(List<String> values, Path outputFile, HiveConf hiveConf)
-      throws SemanticException {
-    FileSystem fs = null;
-    DataOutputStream outStream = null;
-    try {
-      fs = outputFile.getFileSystem(hiveConf);
-      outStream = fs.create(outputFile);
-      outStream.writeBytes((values.get(0) == null ? Utilities.nullStringOutput : values.get(0)));
-      for (int i = 1; i < values.size(); i++) {
-        outStream.write(Utilities.tabCode);
-        outStream.writeBytes((values.get(i) == null ? Utilities.nullStringOutput : values.get(i)));
-      }
-      outStream.write(Utilities.newLineCode);
-    } catch (IOException e) {
-      throw new SemanticException(e);
-    } finally {
-      IOUtils.closeStream(outStream);
-    }
+    Utils.writeOutput(values, ctx.getResFile(), conf);
   }
 
   private ReplicationSpec getNewReplicationSpec() throws SemanticException {
@@ -1327,14 +1217,11 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
       SemanticAnalyzer.VALUES_TMP_TABLE_NAME_PREFIX.toLowerCase();
 
   static Iterable<String> removeValuesTemporaryTables(List<String> tableNames) {
-    List<String> allTables = new ArrayList<>(tableNames);
-    CollectionUtils.filter(allTables, new Predicate() {
-      @Override
-      public boolean evaluate(Object tableName) {
-        return !tableName.toString().toLowerCase().startsWith(TMP_TABLE_PREFIX);
-      }
-    });
-    return allTables;
+    return Collections2.filter(tableNames,
+        tableName -> {
+          assert tableName != null;
+          return !tableName.toLowerCase().startsWith(TMP_TABLE_PREFIX);
+        });
   }
 
   private Iterable<? extends String> matchesDb(String dbPattern) throws HiveException {

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java
new file mode 100644
index 0000000..b1df5a3
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl;
+
+public enum DumpType {
+  BOOTSTRAP("BOOTSTRAP"),
+  INCREMENTAL("INCREMENTAL"),
+  EVENT_CREATE_TABLE("EVENT_CREATE_TABLE"),
+  EVENT_ADD_PARTITION("EVENT_ADD_PARTITION"),
+  EVENT_DROP_TABLE("EVENT_DROP_TABLE"),
+  EVENT_DROP_PARTITION("EVENT_DROP_PARTITION"),
+  EVENT_ALTER_TABLE("EVENT_ALTER_TABLE"),
+  EVENT_RENAME_TABLE("EVENT_RENAME_TABLE"),
+  EVENT_TRUNCATE_TABLE("EVENT_TRUNCATE_TABLE"),
+  EVENT_ALTER_PARTITION("EVENT_ALTER_PARTITION"),
+  EVENT_RENAME_PARTITION("EVENT_RENAME_PARTITION"),
+  EVENT_TRUNCATE_PARTITION("EVENT_TRUNCATE_PARTITION"),
+  EVENT_INSERT("EVENT_INSERT"),
+  EVENT_UNKNOWN("EVENT_UNKNOWN");
+
+  String type = null;
+  DumpType(String type) {
+    this.type = type;
+  }
+
+  @Override
+  public String toString(){
+    return type;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/BootStrapReplicationSpecFunction.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/BootStrapReplicationSpecFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/BootStrapReplicationSpecFunction.java
new file mode 100644
index 0000000..ae37c73
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/BootStrapReplicationSpecFunction.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump;
+
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+class BootStrapReplicationSpecFunction implements HiveWrapper.Tuple.Function<ReplicationSpec> {
+  private final Hive db;
+
+  BootStrapReplicationSpecFunction(Hive db) {
+    this.db = db;
+  }
+
+  @Override
+  public ReplicationSpec fromMetaStore() throws HiveException {
+    try {
+      ReplicationSpec replicationSpec =
+          new ReplicationSpec(
+              true,
+              false,
+              "replv2",
+              "will-be-set",
+              false,
+              true,
+              false
+          );
+      long currentNotificationId = db.getMSC()
+          .getCurrentNotificationEventId().getEventId();
+      replicationSpec.setCurrentReplicationState(String.valueOf(currentNotificationId));
+      return replicationSpec;
+    } catch (Exception e) {
+      throw new SemanticException(e);
+      // TODO : simple wrap & rethrow for now, clean up with error codes
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/DBSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/DBSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/DBSerializer.java
deleted file mode 100644
index 40770de..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/DBSerializer.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.dump;
-
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TJSONProtocol;
-
-import java.io.IOException;
-
-public class DBSerializer implements JsonWriter.Serializer {
-  private final Database dbObject;
-
-  public DBSerializer(Database dbObject) {
-    this.dbObject = dbObject;
-  }
-
-  @Override
-  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
-      throws SemanticException, IOException {
-    dbObject.putToParameters(
-        ReplicationSpec.KEY.CURR_STATE_ID.toString(),
-        additionalPropertiesProvider.getCurrentReplicationState()
-    );
-    TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
-    try {
-      String value = serializer.toString(dbObject, "UTF-8");
-      writer.jsonGenerator.writeStringField("db", value);
-    } catch (TException e) {
-      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
-    }
-  }
-}
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/FunctionSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/FunctionSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/FunctionSerializer.java
deleted file mode 100644
index 6b03766..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/FunctionSerializer.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.dump;
-
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TJSONProtocol;
-
-import java.io.IOException;
-
-public class FunctionSerializer implements JsonWriter.Serializer {
-  private Function function;
-
-  public FunctionSerializer(Function function) {
-    this.function = function;
-  }
-
-  @Override
-  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
-      throws SemanticException, IOException {
-    TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
-    try {
-      writer.jsonGenerator
-          .writeStringField("function", serializer.toString(function, "UTF-8"));
-    } catch (TException e) {
-      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
new file mode 100644
index 0000000..1dcaec2
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/HiveWrapper.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+
+/**
+ * The idea for this class is that since we need to make sure that
+ * we query the replication id from the db before we do any queries
+ * to get the object from metastore like tables/functions/partitions etc
+ * we are devising this wrapper to wrap all such ordering of statements here.
+ */
+
+public class HiveWrapper {
+  private final Hive db;
+  private final String dbName;
+  private final BootStrapReplicationSpecFunction functionForSpec;
+
+  public HiveWrapper(Hive db, String dbName) {
+    this.dbName = dbName;
+    this.db = db;
+    this.functionForSpec = new BootStrapReplicationSpecFunction(db);
+  }
+
+  public Tuple<org.apache.hadoop.hive.metastore.api.Function> function(final String name)
+      throws HiveException {
+    return new Tuple<>(functionForSpec, () -> db.getFunction(dbName, name));
+  }
+
+  public Tuple<Database> database() throws HiveException {
+    return new Tuple<>(functionForSpec, () -> db.getDatabase(dbName));
+  }
+
+  public static class Tuple<T> {
+
+    interface Function<T> {
+      T fromMetaStore() throws HiveException;
+    }
+
+    public final ReplicationSpec replicationSpec;
+    public final T object;
+
+    /**
+     * we have to get the replicationspec before we query for the function object
+     * from the hive metastore as the spec creation captures the latest event id for replication
+     * and we dont want to miss any events hence we are ok replaying some events as part of
+     * incremental load to achieve a consistent state of the warehouse.
+     */
+    Tuple(Function<ReplicationSpec> replicationSpecFunction,
+        Function<T> functionForObject) throws HiveException {
+      this.replicationSpec = replicationSpecFunction.fromMetaStore();
+      this.object = functionForObject.fromMetaStore();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/JsonWriter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/JsonWriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/JsonWriter.java
deleted file mode 100644
index 1aa1195..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/JsonWriter.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.dump;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonGenerator;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.OutputStream;
-
-import static org.apache.hadoop.hive.ql.parse.EximUtil.METADATA_FORMAT_VERSION;
-
-public class JsonWriter implements Closeable {
-
-  final JsonGenerator jsonGenerator;
-
-  public JsonWriter(FileSystem fs, Path writePath) throws IOException {
-    OutputStream out = fs.create(writePath);
-    jsonGenerator = new JsonFactory().createJsonGenerator(out);
-    jsonGenerator.writeStartObject();
-    jsonGenerator.writeStringField("version", METADATA_FORMAT_VERSION);
-  }
-
-  @Override
-  public void close() throws IOException {
-    jsonGenerator.writeEndObject();
-    jsonGenerator.close();
-  }
-
-  public interface Serializer {
-    void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider) throws
-        SemanticException, IOException;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionSerializer.java
deleted file mode 100644
index 313d108..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionSerializer.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.dump;
-
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TJSONProtocol;
-
-import java.io.IOException;
-import java.util.Map;
-
-class PartitionSerializer implements JsonWriter.Serializer {
-  private Partition partition;
-
-  PartitionSerializer(Partition partition) {
-    this.partition = partition;
-  }
-
-  @Override
-  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
-      throws SemanticException, IOException {
-    TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
-    try {
-      if (additionalPropertiesProvider.isInReplicationScope()) {
-        partition.putToParameters(
-            ReplicationSpec.KEY.CURR_STATE_ID.toString(),
-            additionalPropertiesProvider.getCurrentReplicationState());
-        if (isPartitionExternal()) {
-          // Replication destination will not be external
-          partition.putToParameters("EXTERNAL", "FALSE");
-        }
-      }
-      writer.jsonGenerator.writeString(serializer.toString(partition, "UTF-8"));
-      writer.jsonGenerator.flush();
-    } catch (TException e) {
-      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
-    }
-  }
-
-  private boolean isPartitionExternal() {
-    Map<String, String> params = partition.getParameters();
-    return params.containsKey("EXTERNAL")
-        && params.get("EXTERNAL").equalsIgnoreCase("TRUE");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/ReplicationSpecSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/ReplicationSpecSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/ReplicationSpecSerializer.java
deleted file mode 100644
index d88a553..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/ReplicationSpecSerializer.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.dump;
-
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-
-import java.io.IOException;
-
-public class ReplicationSpecSerializer implements JsonWriter.Serializer {
-  @Override
-  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
-      throws SemanticException, IOException {
-    for (ReplicationSpec.KEY key : ReplicationSpec.KEY.values()) {
-      String value = additionalPropertiesProvider.get(key);
-      if (value != null) {
-        writer.jsonGenerator.writeStringField(key.toString(), value);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableSerializer.java
deleted file mode 100644
index a2e258f..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableSerializer.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.dump;
-
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TJSONProtocol;
-
-import java.io.IOException;
-import java.util.Map;
-
-public class TableSerializer implements JsonWriter.Serializer {
-  private final org.apache.hadoop.hive.ql.metadata.Table tableHandle;
-  private final Iterable<Partition> partitions;
-
-  public TableSerializer(org.apache.hadoop.hive.ql.metadata.Table tableHandle,
-      Iterable<Partition> partitions) {
-    this.tableHandle = tableHandle;
-    this.partitions = partitions;
-  }
-
-  @Override
-  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
-      throws SemanticException, IOException {
-    if (cannotReplicateTable(additionalPropertiesProvider)) {
-      return;
-    }
-
-    Table tTable = tableHandle.getTTable();
-    tTable = addPropertiesToTable(tTable, additionalPropertiesProvider);
-    try {
-      TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
-      writer.jsonGenerator
-          .writeStringField("table", serializer.toString(tTable, "UTF-8"));
-      writer.jsonGenerator.writeFieldName("partitions");
-      writePartitions(writer, additionalPropertiesProvider);
-    } catch (TException e) {
-      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
-    }
-  }
-
-  private boolean cannotReplicateTable(ReplicationSpec additionalPropertiesProvider) {
-    return tableHandle == null || additionalPropertiesProvider.isNoop();
-  }
-
-  private Table addPropertiesToTable(Table table, ReplicationSpec additionalPropertiesProvider)
-      throws SemanticException, IOException {
-    if (additionalPropertiesProvider.isInReplicationScope()) {
-      table.putToParameters(
-            ReplicationSpec.KEY.CURR_STATE_ID.toString(),
-            additionalPropertiesProvider.getCurrentReplicationState());
-      if (isExternalTable(table)) {
-          // Replication destination will not be external - override if set
-        table.putToParameters("EXTERNAL", "FALSE");
-        }
-      if (isExternalTableType(table)) {
-          // Replication dest will not be external - override if set
-        table.setTableType(TableType.MANAGED_TABLE.toString());
-        }
-    } else {
-      // ReplicationSpec.KEY scopeKey = ReplicationSpec.KEY.REPL_SCOPE;
-      // write(out, ",\""+ scopeKey.toString() +"\":\"" + replicationSpec.get(scopeKey) + "\"");
-      // TODO: if we want to be explicit about this dump not being a replication dump, we can
-      // uncomment this else section, but currently unnneeded. Will require a lot of golden file
-      // regen if we do so.
-    }
-    return table;
-  }
-
-  private boolean isExternalTableType(org.apache.hadoop.hive.metastore.api.Table table) {
-    return table.isSetTableType()
-        && table.getTableType().equalsIgnoreCase(TableType.EXTERNAL_TABLE.toString());
-  }
-
-  private boolean isExternalTable(org.apache.hadoop.hive.metastore.api.Table table) {
-    Map<String, String> params = table.getParameters();
-    return params.containsKey("EXTERNAL")
-        && params.get("EXTERNAL").equalsIgnoreCase("TRUE");
-  }
-
-  private void writePartitions(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
-      throws SemanticException, IOException {
-    writer.jsonGenerator.writeStartArray();
-    if (partitions != null) {
-      for (org.apache.hadoop.hive.ql.metadata.Partition partition : partitions) {
-        new PartitionSerializer(partition.getTPartition())
-            .writeTo(writer, additionalPropertiesProvider);
-      }
-    }
-    writer.jsonGenerator.writeEndArray();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
new file mode 100644
index 0000000..846b6f5
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.io.IOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.List;
+
+public class Utils {
+  public static void writeOutput(List<String> values, Path outputFile, HiveConf hiveConf)
+      throws SemanticException {
+    DataOutputStream outStream = null;
+    try {
+      FileSystem fs = outputFile.getFileSystem(hiveConf);
+      outStream = fs.create(outputFile);
+      outStream.writeBytes((values.get(0) == null ? Utilities.nullStringOutput : values.get(0)));
+      for (int i = 1; i < values.size(); i++) {
+        outStream.write(Utilities.tabCode);
+        outStream.writeBytes((values.get(i) == null ? Utilities.nullStringOutput : values.get(i)));
+      }
+      outStream.write(Utilities.newLineCode);
+    } catch (IOException e) {
+      throw new SemanticException(e);
+    } finally {
+      IOUtils.closeStream(outStream);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/VersionCompatibleSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/VersionCompatibleSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/VersionCompatibleSerializer.java
deleted file mode 100644
index 3ebc803..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/VersionCompatibleSerializer.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.parse.repl.dump;
-
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.hive.ql.parse.EximUtil.METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION;
-
-/**
- * This is not used as of now as the conditional which lead to its usage is always false
- * hence we have removed the conditional and the usage of this class, but might be required in future.
- */
-public class VersionCompatibleSerializer implements JsonWriter.Serializer {
-  @Override
-  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
-      throws SemanticException, IOException {
-    writer.jsonGenerator.writeStringField("fcversion", METADATA_FORMAT_FORWARD_COMPATIBLE_VERSION);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/DBSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/DBSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/DBSerializer.java
new file mode 100644
index 0000000..15b7e13
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/DBSerializer.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.thrift.TException;
+import org.apache.thrift.TSerializer;
+import org.apache.thrift.protocol.TJSONProtocol;
+
+import java.io.IOException;
+
+public class DBSerializer implements JsonWriter.Serializer {
+  public static final String FIELD_NAME = "db";
+  private final Database dbObject;
+
+  public DBSerializer(Database dbObject) {
+    this.dbObject = dbObject;
+  }
+
+  @Override
+  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
+      throws SemanticException, IOException {
+    dbObject.putToParameters(
+        ReplicationSpec.KEY.CURR_STATE_ID.toString(),
+        additionalPropertiesProvider.getCurrentReplicationState()
+    );
+    TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
+    try {
+      String value = serializer.toString(dbObject, UTF_8);
+      writer.jsonGenerator.writeStringField(FIELD_NAME, value);
+    } catch (TException e) {
+      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
+    }
+  }
+}
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FunctionSerializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FunctionSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FunctionSerializer.java
new file mode 100644
index 0000000..5dc7023
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FunctionSerializer.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.thrift.TException;
+import org.apache.thrift.TSerializer;
+import org.apache.thrift.protocol.TJSONProtocol;
+
+import java.io.IOException;
+
+public class FunctionSerializer implements JsonWriter.Serializer {
+  public static final String FIELD_NAME="function";
+  private Function function;
+
+  public FunctionSerializer(Function function) {
+    this.function = function;
+  }
+
+  @Override
+  public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
+      throws SemanticException, IOException {
+    TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
+    try {
+      writer.jsonGenerator
+          .writeStringField(FIELD_NAME, serializer.toString(function, UTF_8));
+    } catch (TException e) {
+      throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/9e9356b5/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/JsonWriter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/JsonWriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/JsonWriter.java
new file mode 100644
index 0000000..e20be68
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/JsonWriter.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonGenerator;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.OutputStream;
+
+import static org.apache.hadoop.hive.ql.parse.EximUtil.METADATA_FORMAT_VERSION;
+
+public class JsonWriter implements Closeable {
+
+  final JsonGenerator jsonGenerator;
+
+  public JsonWriter(FileSystem fs, Path writePath) throws IOException {
+    OutputStream out = fs.create(writePath);
+    jsonGenerator = new JsonFactory().createJsonGenerator(out);
+    jsonGenerator.writeStartObject();
+    jsonGenerator.writeStringField("version", METADATA_FORMAT_VERSION);
+  }
+
+  @Override
+  public void close() throws IOException {
+    jsonGenerator.writeEndObject();
+    jsonGenerator.close();
+  }
+
+  public interface Serializer {
+    String UTF_8 = "UTF-8";
+    void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider) throws
+        SemanticException, IOException;
+  }
+}


[47/50] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

Posted by we...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 96c2b0b,e3725a5..cfa2e49
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@@ -18058,26 -18056,25 +18102,26 @@@ uint32_t GetNextWriteIdRequest::write(:
    return xfer;
  }
  
 -void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) {
 +void swap(GetNextWriteIdRequest &a, GetNextWriteIdRequest &b) {
    using ::std::swap;
 -  swap(a.functions, b.functions);
 -  swap(a.__isset, b.__isset);
 +  swap(a.dbName, b.dbName);
 +  swap(a.tblName, b.tblName);
  }
  
- GetNextWriteIdRequest::GetNextWriteIdRequest(const GetNextWriteIdRequest& other747) {
 -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other752) {
 -  functions = other752.functions;
 -  __isset = other752.__isset;
++GetNextWriteIdRequest::GetNextWriteIdRequest(const GetNextWriteIdRequest& other746) {
++  dbName = other746.dbName;
++  tblName = other746.tblName;
+ }
 -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other753) {
 -  functions = other753.functions;
 -  __isset = other753.__isset;
++GetNextWriteIdRequest& GetNextWriteIdRequest::operator=(const GetNextWriteIdRequest& other747) {
 +  dbName = other747.dbName;
 +  tblName = other747.tblName;
- }
- GetNextWriteIdRequest& GetNextWriteIdRequest::operator=(const GetNextWriteIdRequest& other748) {
-   dbName = other748.dbName;
-   tblName = other748.tblName;
    return *this;
  }
 -void GetAllFunctionsResponse::printTo(std::ostream& out) const {
 +void GetNextWriteIdRequest::printTo(std::ostream& out) const {
    using ::apache::thrift::to_string;
 -  out << "GetAllFunctionsResponse(";
 -  out << "functions="; (__isset.functions ? (out << to_string(functions)) : (out << "<null>"));
 +  out << "GetNextWriteIdRequest(";
 +  out << "dbName=" << to_string(dbName);
 +  out << ", " << "tblName=" << to_string(tblName);
    out << ")";
  }
  
@@@ -18148,22 -18167,22 +18192,22 @@@ uint32_t GetNextWriteIdResult::write(::
    return xfer;
  }
  
 -void swap(ClientCapabilities &a, ClientCapabilities &b) {
 +void swap(GetNextWriteIdResult &a, GetNextWriteIdResult &b) {
    using ::std::swap;
 -  swap(a.values, b.values);
 +  swap(a.writeId, b.writeId);
  }
  
- GetNextWriteIdResult::GetNextWriteIdResult(const GetNextWriteIdResult& other749) {
-   writeId = other749.writeId;
 -ClientCapabilities::ClientCapabilities(const ClientCapabilities& other761) {
 -  values = other761.values;
++GetNextWriteIdResult::GetNextWriteIdResult(const GetNextWriteIdResult& other748) {
++  writeId = other748.writeId;
  }
- GetNextWriteIdResult& GetNextWriteIdResult::operator=(const GetNextWriteIdResult& other750) {
-   writeId = other750.writeId;
 -ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other762) {
 -  values = other762.values;
++GetNextWriteIdResult& GetNextWriteIdResult::operator=(const GetNextWriteIdResult& other749) {
++  writeId = other749.writeId;
    return *this;
  }
 -void ClientCapabilities::printTo(std::ostream& out) const {
 +void GetNextWriteIdResult::printTo(std::ostream& out) const {
    using ::apache::thrift::to_string;
 -  out << "ClientCapabilities(";
 -  out << "values=" << to_string(values);
 +  out << "GetNextWriteIdResult(";
 +  out << "writeId=" << to_string(writeId);
    out << ")";
  }
  
@@@ -18295,30 -18294,29 +18339,30 @@@ void swap(FinalizeWriteIdRequest &a, Fi
    using ::std::swap;
    swap(a.dbName, b.dbName);
    swap(a.tblName, b.tblName);
 -  swap(a.capabilities, b.capabilities);
 -  swap(a.__isset, b.__isset);
 +  swap(a.writeId, b.writeId);
 +  swap(a.commit, b.commit);
  }
  
- FinalizeWriteIdRequest::FinalizeWriteIdRequest(const FinalizeWriteIdRequest& other751) {
 -GetTableRequest::GetTableRequest(const GetTableRequest& other763) {
 -  dbName = other763.dbName;
 -  tblName = other763.tblName;
 -  capabilities = other763.capabilities;
 -  __isset = other763.__isset;
++FinalizeWriteIdRequest::FinalizeWriteIdRequest(const FinalizeWriteIdRequest& other750) {
++  dbName = other750.dbName;
++  tblName = other750.tblName;
++  writeId = other750.writeId;
++  commit = other750.commit;
+ }
 -GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other764) {
 -  dbName = other764.dbName;
 -  tblName = other764.tblName;
 -  capabilities = other764.capabilities;
 -  __isset = other764.__isset;
++FinalizeWriteIdRequest& FinalizeWriteIdRequest::operator=(const FinalizeWriteIdRequest& other751) {
 +  dbName = other751.dbName;
 +  tblName = other751.tblName;
 +  writeId = other751.writeId;
 +  commit = other751.commit;
- }
- FinalizeWriteIdRequest& FinalizeWriteIdRequest::operator=(const FinalizeWriteIdRequest& other752) {
-   dbName = other752.dbName;
-   tblName = other752.tblName;
-   writeId = other752.writeId;
-   commit = other752.commit;
    return *this;
  }
 -void GetTableRequest::printTo(std::ostream& out) const {
 +void FinalizeWriteIdRequest::printTo(std::ostream& out) const {
    using ::apache::thrift::to_string;
 -  out << "GetTableRequest(";
 +  out << "FinalizeWriteIdRequest(";
    out << "dbName=" << to_string(dbName);
    out << ", " << "tblName=" << to_string(tblName);
 -  out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "<null>"));
 +  out << ", " << "writeId=" << to_string(writeId);
 +  out << ", " << "commit=" << to_string(commit);
    out << ")";
  }
  
@@@ -18365,22 -18387,22 +18409,22 @@@ uint32_t FinalizeWriteIdResult::write(:
    return xfer;
  }
  
 -void swap(GetTableResult &a, GetTableResult &b) {
 +void swap(FinalizeWriteIdResult &a, FinalizeWriteIdResult &b) {
    using ::std::swap;
 -  swap(a.table, b.table);
 +  (void) a;
 +  (void) b;
  }
  
- FinalizeWriteIdResult::FinalizeWriteIdResult(const FinalizeWriteIdResult& other753) {
-   (void) other753;
 -GetTableResult::GetTableResult(const GetTableResult& other765) {
 -  table = other765.table;
++FinalizeWriteIdResult::FinalizeWriteIdResult(const FinalizeWriteIdResult& other752) {
++  (void) other752;
  }
- FinalizeWriteIdResult& FinalizeWriteIdResult::operator=(const FinalizeWriteIdResult& other754) {
-   (void) other754;
 -GetTableResult& GetTableResult::operator=(const GetTableResult& other766) {
 -  table = other766.table;
++FinalizeWriteIdResult& FinalizeWriteIdResult::operator=(const FinalizeWriteIdResult& other753) {
++  (void) other753;
    return *this;
  }
 -void GetTableResult::printTo(std::ostream& out) const {
 +void FinalizeWriteIdResult::printTo(std::ostream& out) const {
    using ::apache::thrift::to_string;
 -  out << "GetTableResult(";
 -  out << "table=" << to_string(table);
 +  out << "FinalizeWriteIdResult(";
    out << ")";
  }
  
@@@ -18489,30 -18529,33 +18533,30 @@@ uint32_t HeartbeatWriteIdRequest::write
    return xfer;
  }
  
 -void swap(GetTablesRequest &a, GetTablesRequest &b) {
 +void swap(HeartbeatWriteIdRequest &a, HeartbeatWriteIdRequest &b) {
    using ::std::swap;
    swap(a.dbName, b.dbName);
 -  swap(a.tblNames, b.tblNames);
 -  swap(a.capabilities, b.capabilities);
 -  swap(a.__isset, b.__isset);
 +  swap(a.tblName, b.tblName);
 +  swap(a.writeId, b.writeId);
  }
  
- HeartbeatWriteIdRequest::HeartbeatWriteIdRequest(const HeartbeatWriteIdRequest& other755) {
 -GetTablesRequest::GetTablesRequest(const GetTablesRequest& other773) {
 -  dbName = other773.dbName;
 -  tblNames = other773.tblNames;
 -  capabilities = other773.capabilities;
 -  __isset = other773.__isset;
++HeartbeatWriteIdRequest::HeartbeatWriteIdRequest(const HeartbeatWriteIdRequest& other754) {
++  dbName = other754.dbName;
++  tblName = other754.tblName;
++  writeId = other754.writeId;
+ }
 -GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other774) {
 -  dbName = other774.dbName;
 -  tblNames = other774.tblNames;
 -  capabilities = other774.capabilities;
 -  __isset = other774.__isset;
++HeartbeatWriteIdRequest& HeartbeatWriteIdRequest::operator=(const HeartbeatWriteIdRequest& other755) {
 +  dbName = other755.dbName;
 +  tblName = other755.tblName;
 +  writeId = other755.writeId;
- }
- HeartbeatWriteIdRequest& HeartbeatWriteIdRequest::operator=(const HeartbeatWriteIdRequest& other756) {
-   dbName = other756.dbName;
-   tblName = other756.tblName;
-   writeId = other756.writeId;
    return *this;
  }
 -void GetTablesRequest::printTo(std::ostream& out) const {
 +void HeartbeatWriteIdRequest::printTo(std::ostream& out) const {
    using ::apache::thrift::to_string;
 -  out << "GetTablesRequest(";
 +  out << "HeartbeatWriteIdRequest(";
    out << "dbName=" << to_string(dbName);
 -  out << ", " << "tblNames="; (__isset.tblNames ? (out << to_string(tblNames)) : (out << "<null>"));
 -  out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "<null>"));
 +  out << ", " << "tblName=" << to_string(tblName);
 +  out << ", " << "writeId=" << to_string(writeId);
    out << ")";
  }
  
@@@ -18521,947 -18564,11 +18565,947 @@@ HeartbeatWriteIdResult::~HeartbeatWrite
  }
  
  
 -void GetTablesResult::__set_tables(const std::vector<Table> & val) {
 -  this->tables = val;
 -}
 -
 -uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) {
 +uint32_t HeartbeatWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    xfer += iprot->skip(ftype);
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  return xfer;
 +}
 +
 +uint32_t HeartbeatWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
 +  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
 +  xfer += oprot->writeStructBegin("HeartbeatWriteIdResult");
 +
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
 +void swap(HeartbeatWriteIdResult &a, HeartbeatWriteIdResult &b) {
 +  using ::std::swap;
 +  (void) a;
 +  (void) b;
 +}
 +
- HeartbeatWriteIdResult::HeartbeatWriteIdResult(const HeartbeatWriteIdResult& other757) {
-   (void) other757;
++HeartbeatWriteIdResult::HeartbeatWriteIdResult(const HeartbeatWriteIdResult& other756) {
++  (void) other756;
 +}
- HeartbeatWriteIdResult& HeartbeatWriteIdResult::operator=(const HeartbeatWriteIdResult& other758) {
-   (void) other758;
++HeartbeatWriteIdResult& HeartbeatWriteIdResult::operator=(const HeartbeatWriteIdResult& other757) {
++  (void) other757;
 +  return *this;
 +}
 +void HeartbeatWriteIdResult::printTo(std::ostream& out) const {
 +  using ::apache::thrift::to_string;
 +  out << "HeartbeatWriteIdResult(";
 +  out << ")";
 +}
 +
 +
 +GetValidWriteIdsRequest::~GetValidWriteIdsRequest() throw() {
 +}
 +
 +
 +void GetValidWriteIdsRequest::__set_dbName(const std::string& val) {
 +  this->dbName = val;
 +}
 +
 +void GetValidWriteIdsRequest::__set_tblName(const std::string& val) {
 +  this->tblName = val;
 +}
 +
 +uint32_t GetValidWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +  bool isset_dbName = false;
 +  bool isset_tblName = false;
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    switch (fid)
 +    {
 +      case 1:
 +        if (ftype == ::apache::thrift::protocol::T_STRING) {
 +          xfer += iprot->readString(this->dbName);
 +          isset_dbName = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      case 2:
 +        if (ftype == ::apache::thrift::protocol::T_STRING) {
 +          xfer += iprot->readString(this->tblName);
 +          isset_tblName = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      default:
 +        xfer += iprot->skip(ftype);
 +        break;
 +    }
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  if (!isset_dbName)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  if (!isset_tblName)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  return xfer;
 +}
 +
 +uint32_t GetValidWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
 +  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
 +  xfer += oprot->writeStructBegin("GetValidWriteIdsRequest");
 +
 +  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
 +  xfer += oprot->writeString(this->dbName);
 +  xfer += oprot->writeFieldEnd();
 +
 +  xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
 +  xfer += oprot->writeString(this->tblName);
 +  xfer += oprot->writeFieldEnd();
 +
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
 +void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b) {
 +  using ::std::swap;
 +  swap(a.dbName, b.dbName);
 +  swap(a.tblName, b.tblName);
 +}
 +
- GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other759) {
++GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other758) {
++  dbName = other758.dbName;
++  tblName = other758.tblName;
++}
++GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other759) {
 +  dbName = other759.dbName;
 +  tblName = other759.tblName;
- }
- GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other760) {
-   dbName = other760.dbName;
-   tblName = other760.tblName;
 +  return *this;
 +}
 +void GetValidWriteIdsRequest::printTo(std::ostream& out) const {
 +  using ::apache::thrift::to_string;
 +  out << "GetValidWriteIdsRequest(";
 +  out << "dbName=" << to_string(dbName);
 +  out << ", " << "tblName=" << to_string(tblName);
 +  out << ")";
 +}
 +
 +
 +GetValidWriteIdsResult::~GetValidWriteIdsResult() throw() {
 +}
 +
 +
 +void GetValidWriteIdsResult::__set_lowWatermarkId(const int64_t val) {
 +  this->lowWatermarkId = val;
 +}
 +
 +void GetValidWriteIdsResult::__set_highWatermarkId(const int64_t val) {
 +  this->highWatermarkId = val;
 +}
 +
 +void GetValidWriteIdsResult::__set_areIdsValid(const bool val) {
 +  this->areIdsValid = val;
 +__isset.areIdsValid = true;
 +}
 +
 +void GetValidWriteIdsResult::__set_ids(const std::vector<int64_t> & val) {
 +  this->ids = val;
 +__isset.ids = true;
 +}
 +
 +uint32_t GetValidWriteIdsResult::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +  bool isset_lowWatermarkId = false;
 +  bool isset_highWatermarkId = false;
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    switch (fid)
 +    {
 +      case 1:
 +        if (ftype == ::apache::thrift::protocol::T_I64) {
 +          xfer += iprot->readI64(this->lowWatermarkId);
 +          isset_lowWatermarkId = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      case 2:
 +        if (ftype == ::apache::thrift::protocol::T_I64) {
 +          xfer += iprot->readI64(this->highWatermarkId);
 +          isset_highWatermarkId = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      case 3:
 +        if (ftype == ::apache::thrift::protocol::T_BOOL) {
 +          xfer += iprot->readBool(this->areIdsValid);
 +          this->__isset.areIdsValid = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      case 4:
 +        if (ftype == ::apache::thrift::protocol::T_LIST) {
 +          {
 +            this->ids.clear();
-             uint32_t _size761;
-             ::apache::thrift::protocol::TType _etype764;
-             xfer += iprot->readListBegin(_etype764, _size761);
-             this->ids.resize(_size761);
-             uint32_t _i765;
-             for (_i765 = 0; _i765 < _size761; ++_i765)
++            uint32_t _size760;
++            ::apache::thrift::protocol::TType _etype763;
++            xfer += iprot->readListBegin(_etype763, _size760);
++            this->ids.resize(_size760);
++            uint32_t _i764;
++            for (_i764 = 0; _i764 < _size760; ++_i764)
 +            {
-               xfer += iprot->readI64(this->ids[_i765]);
++              xfer += iprot->readI64(this->ids[_i764]);
 +            }
 +            xfer += iprot->readListEnd();
 +          }
 +          this->__isset.ids = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      default:
 +        xfer += iprot->skip(ftype);
 +        break;
 +    }
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  if (!isset_lowWatermarkId)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  if (!isset_highWatermarkId)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  return xfer;
 +}
 +
 +uint32_t GetValidWriteIdsResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
 +  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
 +  xfer += oprot->writeStructBegin("GetValidWriteIdsResult");
 +
 +  xfer += oprot->writeFieldBegin("lowWatermarkId", ::apache::thrift::protocol::T_I64, 1);
 +  xfer += oprot->writeI64(this->lowWatermarkId);
 +  xfer += oprot->writeFieldEnd();
 +
 +  xfer += oprot->writeFieldBegin("highWatermarkId", ::apache::thrift::protocol::T_I64, 2);
 +  xfer += oprot->writeI64(this->highWatermarkId);
 +  xfer += oprot->writeFieldEnd();
 +
 +  if (this->__isset.areIdsValid) {
 +    xfer += oprot->writeFieldBegin("areIdsValid", ::apache::thrift::protocol::T_BOOL, 3);
 +    xfer += oprot->writeBool(this->areIdsValid);
 +    xfer += oprot->writeFieldEnd();
 +  }
 +  if (this->__isset.ids) {
 +    xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_LIST, 4);
 +    {
 +      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->ids.size()));
-       std::vector<int64_t> ::const_iterator _iter766;
-       for (_iter766 = this->ids.begin(); _iter766 != this->ids.end(); ++_iter766)
++      std::vector<int64_t> ::const_iterator _iter765;
++      for (_iter765 = this->ids.begin(); _iter765 != this->ids.end(); ++_iter765)
 +      {
-         xfer += oprot->writeI64((*_iter766));
++        xfer += oprot->writeI64((*_iter765));
 +      }
 +      xfer += oprot->writeListEnd();
 +    }
 +    xfer += oprot->writeFieldEnd();
 +  }
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
 +void swap(GetValidWriteIdsResult &a, GetValidWriteIdsResult &b) {
 +  using ::std::swap;
 +  swap(a.lowWatermarkId, b.lowWatermarkId);
 +  swap(a.highWatermarkId, b.highWatermarkId);
 +  swap(a.areIdsValid, b.areIdsValid);
 +  swap(a.ids, b.ids);
 +  swap(a.__isset, b.__isset);
 +}
 +
- GetValidWriteIdsResult::GetValidWriteIdsResult(const GetValidWriteIdsResult& other767) {
++GetValidWriteIdsResult::GetValidWriteIdsResult(const GetValidWriteIdsResult& other766) {
++  lowWatermarkId = other766.lowWatermarkId;
++  highWatermarkId = other766.highWatermarkId;
++  areIdsValid = other766.areIdsValid;
++  ids = other766.ids;
++  __isset = other766.__isset;
++}
++GetValidWriteIdsResult& GetValidWriteIdsResult::operator=(const GetValidWriteIdsResult& other767) {
 +  lowWatermarkId = other767.lowWatermarkId;
 +  highWatermarkId = other767.highWatermarkId;
 +  areIdsValid = other767.areIdsValid;
 +  ids = other767.ids;
 +  __isset = other767.__isset;
- }
- GetValidWriteIdsResult& GetValidWriteIdsResult::operator=(const GetValidWriteIdsResult& other768) {
-   lowWatermarkId = other768.lowWatermarkId;
-   highWatermarkId = other768.highWatermarkId;
-   areIdsValid = other768.areIdsValid;
-   ids = other768.ids;
-   __isset = other768.__isset;
 +  return *this;
 +}
 +void GetValidWriteIdsResult::printTo(std::ostream& out) const {
 +  using ::apache::thrift::to_string;
 +  out << "GetValidWriteIdsResult(";
 +  out << "lowWatermarkId=" << to_string(lowWatermarkId);
 +  out << ", " << "highWatermarkId=" << to_string(highWatermarkId);
 +  out << ", " << "areIdsValid="; (__isset.areIdsValid ? (out << to_string(areIdsValid)) : (out << "<null>"));
 +  out << ", " << "ids="; (__isset.ids ? (out << to_string(ids)) : (out << "<null>"));
 +  out << ")";
 +}
 +
 +
 +GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() {
 +}
 +
 +
 +void GetAllFunctionsResponse::__set_functions(const std::vector<Function> & val) {
 +  this->functions = val;
 +__isset.functions = true;
 +}
 +
 +uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    switch (fid)
 +    {
 +      case 1:
 +        if (ftype == ::apache::thrift::protocol::T_LIST) {
 +          {
 +            this->functions.clear();
-             uint32_t _size769;
-             ::apache::thrift::protocol::TType _etype772;
-             xfer += iprot->readListBegin(_etype772, _size769);
-             this->functions.resize(_size769);
-             uint32_t _i773;
-             for (_i773 = 0; _i773 < _size769; ++_i773)
++            uint32_t _size768;
++            ::apache::thrift::protocol::TType _etype771;
++            xfer += iprot->readListBegin(_etype771, _size768);
++            this->functions.resize(_size768);
++            uint32_t _i772;
++            for (_i772 = 0; _i772 < _size768; ++_i772)
 +            {
-               xfer += this->functions[_i773].read(iprot);
++              xfer += this->functions[_i772].read(iprot);
 +            }
 +            xfer += iprot->readListEnd();
 +          }
 +          this->__isset.functions = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      default:
 +        xfer += iprot->skip(ftype);
 +        break;
 +    }
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  return xfer;
 +}
 +
 +uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
 +  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
 +  xfer += oprot->writeStructBegin("GetAllFunctionsResponse");
 +
 +  if (this->__isset.functions) {
 +    xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1);
 +    {
 +      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->functions.size()));
-       std::vector<Function> ::const_iterator _iter774;
-       for (_iter774 = this->functions.begin(); _iter774 != this->functions.end(); ++_iter774)
++      std::vector<Function> ::const_iterator _iter773;
++      for (_iter773 = this->functions.begin(); _iter773 != this->functions.end(); ++_iter773)
 +      {
-         xfer += (*_iter774).write(oprot);
++        xfer += (*_iter773).write(oprot);
 +      }
 +      xfer += oprot->writeListEnd();
 +    }
 +    xfer += oprot->writeFieldEnd();
 +  }
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
 +void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) {
 +  using ::std::swap;
 +  swap(a.functions, b.functions);
 +  swap(a.__isset, b.__isset);
 +}
 +
- GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other775) {
++GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other774) {
++  functions = other774.functions;
++  __isset = other774.__isset;
++}
++GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other775) {
 +  functions = other775.functions;
 +  __isset = other775.__isset;
- }
- GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other776) {
-   functions = other776.functions;
-   __isset = other776.__isset;
 +  return *this;
 +}
 +void GetAllFunctionsResponse::printTo(std::ostream& out) const {
 +  using ::apache::thrift::to_string;
 +  out << "GetAllFunctionsResponse(";
 +  out << "functions="; (__isset.functions ? (out << to_string(functions)) : (out << "<null>"));
 +  out << ")";
 +}
 +
 +
 +ClientCapabilities::~ClientCapabilities() throw() {
 +}
 +
 +
 +void ClientCapabilities::__set_values(const std::vector<ClientCapability::type> & val) {
 +  this->values = val;
 +}
 +
 +uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +  bool isset_values = false;
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    switch (fid)
 +    {
 +      case 1:
 +        if (ftype == ::apache::thrift::protocol::T_LIST) {
 +          {
 +            this->values.clear();
-             uint32_t _size777;
-             ::apache::thrift::protocol::TType _etype780;
-             xfer += iprot->readListBegin(_etype780, _size777);
-             this->values.resize(_size777);
-             uint32_t _i781;
-             for (_i781 = 0; _i781 < _size777; ++_i781)
++            uint32_t _size776;
++            ::apache::thrift::protocol::TType _etype779;
++            xfer += iprot->readListBegin(_etype779, _size776);
++            this->values.resize(_size776);
++            uint32_t _i780;
++            for (_i780 = 0; _i780 < _size776; ++_i780)
 +            {
-               int32_t ecast782;
-               xfer += iprot->readI32(ecast782);
-               this->values[_i781] = (ClientCapability::type)ecast782;
++              int32_t ecast781;
++              xfer += iprot->readI32(ecast781);
++              this->values[_i780] = (ClientCapability::type)ecast781;
 +            }
 +            xfer += iprot->readListEnd();
 +          }
 +          isset_values = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      default:
 +        xfer += iprot->skip(ftype);
 +        break;
 +    }
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  if (!isset_values)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  return xfer;
 +}
 +
 +uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
 +  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
 +  xfer += oprot->writeStructBegin("ClientCapabilities");
 +
 +  xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
 +  {
 +    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast<uint32_t>(this->values.size()));
-     std::vector<ClientCapability::type> ::const_iterator _iter783;
-     for (_iter783 = this->values.begin(); _iter783 != this->values.end(); ++_iter783)
++    std::vector<ClientCapability::type> ::const_iterator _iter782;
++    for (_iter782 = this->values.begin(); _iter782 != this->values.end(); ++_iter782)
 +    {
-       xfer += oprot->writeI32((int32_t)(*_iter783));
++      xfer += oprot->writeI32((int32_t)(*_iter782));
 +    }
 +    xfer += oprot->writeListEnd();
 +  }
 +  xfer += oprot->writeFieldEnd();
 +
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
 +void swap(ClientCapabilities &a, ClientCapabilities &b) {
 +  using ::std::swap;
 +  swap(a.values, b.values);
 +}
 +
- ClientCapabilities::ClientCapabilities(const ClientCapabilities& other784) {
-   values = other784.values;
++ClientCapabilities::ClientCapabilities(const ClientCapabilities& other783) {
++  values = other783.values;
 +}
- ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other785) {
-   values = other785.values;
++ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other784) {
++  values = other784.values;
 +  return *this;
 +}
 +void ClientCapabilities::printTo(std::ostream& out) const {
 +  using ::apache::thrift::to_string;
 +  out << "ClientCapabilities(";
 +  out << "values=" << to_string(values);
 +  out << ")";
 +}
 +
 +
 +GetTableRequest::~GetTableRequest() throw() {
 +}
 +
 +
 +void GetTableRequest::__set_dbName(const std::string& val) {
 +  this->dbName = val;
 +}
 +
 +void GetTableRequest::__set_tblName(const std::string& val) {
 +  this->tblName = val;
 +}
 +
 +void GetTableRequest::__set_capabilities(const ClientCapabilities& val) {
 +  this->capabilities = val;
 +__isset.capabilities = true;
 +}
 +
 +uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +  bool isset_dbName = false;
 +  bool isset_tblName = false;
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    switch (fid)
 +    {
 +      case 1:
 +        if (ftype == ::apache::thrift::protocol::T_STRING) {
 +          xfer += iprot->readString(this->dbName);
 +          isset_dbName = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      case 2:
 +        if (ftype == ::apache::thrift::protocol::T_STRING) {
 +          xfer += iprot->readString(this->tblName);
 +          isset_tblName = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      case 3:
 +        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
 +          xfer += this->capabilities.read(iprot);
 +          this->__isset.capabilities = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      default:
 +        xfer += iprot->skip(ftype);
 +        break;
 +    }
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  if (!isset_dbName)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  if (!isset_tblName)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  return xfer;
 +}
 +
 +uint32_t GetTableRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
 +  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
 +  xfer += oprot->writeStructBegin("GetTableRequest");
 +
 +  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
 +  xfer += oprot->writeString(this->dbName);
 +  xfer += oprot->writeFieldEnd();
 +
 +  xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
 +  xfer += oprot->writeString(this->tblName);
 +  xfer += oprot->writeFieldEnd();
 +
 +  if (this->__isset.capabilities) {
 +    xfer += oprot->writeFieldBegin("capabilities", ::apache::thrift::protocol::T_STRUCT, 3);
 +    xfer += this->capabilities.write(oprot);
 +    xfer += oprot->writeFieldEnd();
 +  }
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
 +void swap(GetTableRequest &a, GetTableRequest &b) {
 +  using ::std::swap;
 +  swap(a.dbName, b.dbName);
 +  swap(a.tblName, b.tblName);
 +  swap(a.capabilities, b.capabilities);
 +  swap(a.__isset, b.__isset);
 +}
 +
- GetTableRequest::GetTableRequest(const GetTableRequest& other786) {
++GetTableRequest::GetTableRequest(const GetTableRequest& other785) {
++  dbName = other785.dbName;
++  tblName = other785.tblName;
++  capabilities = other785.capabilities;
++  __isset = other785.__isset;
++}
++GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other786) {
 +  dbName = other786.dbName;
 +  tblName = other786.tblName;
 +  capabilities = other786.capabilities;
 +  __isset = other786.__isset;
- }
- GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other787) {
-   dbName = other787.dbName;
-   tblName = other787.tblName;
-   capabilities = other787.capabilities;
-   __isset = other787.__isset;
 +  return *this;
 +}
 +void GetTableRequest::printTo(std::ostream& out) const {
 +  using ::apache::thrift::to_string;
 +  out << "GetTableRequest(";
 +  out << "dbName=" << to_string(dbName);
 +  out << ", " << "tblName=" << to_string(tblName);
 +  out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "<null>"));
 +  out << ")";
 +}
 +
 +
 +GetTableResult::~GetTableResult() throw() {
 +}
 +
 +
 +void GetTableResult::__set_table(const Table& val) {
 +  this->table = val;
 +}
 +
 +uint32_t GetTableResult::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +  bool isset_table = false;
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    switch (fid)
 +    {
 +      case 1:
 +        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
 +          xfer += this->table.read(iprot);
 +          isset_table = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      default:
 +        xfer += iprot->skip(ftype);
 +        break;
 +    }
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  if (!isset_table)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  return xfer;
 +}
 +
 +uint32_t GetTableResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
 +  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
 +  xfer += oprot->writeStructBegin("GetTableResult");
 +
 +  xfer += oprot->writeFieldBegin("table", ::apache::thrift::protocol::T_STRUCT, 1);
 +  xfer += this->table.write(oprot);
 +  xfer += oprot->writeFieldEnd();
 +
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
 +void swap(GetTableResult &a, GetTableResult &b) {
 +  using ::std::swap;
 +  swap(a.table, b.table);
 +}
 +
- GetTableResult::GetTableResult(const GetTableResult& other788) {
-   table = other788.table;
++GetTableResult::GetTableResult(const GetTableResult& other787) {
++  table = other787.table;
 +}
- GetTableResult& GetTableResult::operator=(const GetTableResult& other789) {
-   table = other789.table;
++GetTableResult& GetTableResult::operator=(const GetTableResult& other788) {
++  table = other788.table;
 +  return *this;
 +}
 +void GetTableResult::printTo(std::ostream& out) const {
 +  using ::apache::thrift::to_string;
 +  out << "GetTableResult(";
 +  out << "table=" << to_string(table);
 +  out << ")";
 +}
 +
 +
 +GetTablesRequest::~GetTablesRequest() throw() {
 +}
 +
 +
 +void GetTablesRequest::__set_dbName(const std::string& val) {
 +  this->dbName = val;
 +}
 +
 +void GetTablesRequest::__set_tblNames(const std::vector<std::string> & val) {
 +  this->tblNames = val;
 +__isset.tblNames = true;
 +}
 +
 +void GetTablesRequest::__set_capabilities(const ClientCapabilities& val) {
 +  this->capabilities = val;
 +__isset.capabilities = true;
 +}
 +
 +uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +  bool isset_dbName = false;
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    switch (fid)
 +    {
 +      case 1:
 +        if (ftype == ::apache::thrift::protocol::T_STRING) {
 +          xfer += iprot->readString(this->dbName);
 +          isset_dbName = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      case 2:
 +        if (ftype == ::apache::thrift::protocol::T_LIST) {
 +          {
 +            this->tblNames.clear();
-             uint32_t _size790;
-             ::apache::thrift::protocol::TType _etype793;
-             xfer += iprot->readListBegin(_etype793, _size790);
-             this->tblNames.resize(_size790);
-             uint32_t _i794;
-             for (_i794 = 0; _i794 < _size790; ++_i794)
++            uint32_t _size789;
++            ::apache::thrift::protocol::TType _etype792;
++            xfer += iprot->readListBegin(_etype792, _size789);
++            this->tblNames.resize(_size789);
++            uint32_t _i793;
++            for (_i793 = 0; _i793 < _size789; ++_i793)
 +            {
-               xfer += iprot->readString(this->tblNames[_i794]);
++              xfer += iprot->readString(this->tblNames[_i793]);
 +            }
 +            xfer += iprot->readListEnd();
 +          }
 +          this->__isset.tblNames = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      case 3:
 +        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
 +          xfer += this->capabilities.read(iprot);
 +          this->__isset.capabilities = true;
 +        } else {
 +          xfer += iprot->skip(ftype);
 +        }
 +        break;
 +      default:
 +        xfer += iprot->skip(ftype);
 +        break;
 +    }
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  if (!isset_dbName)
 +    throw TProtocolException(TProtocolException::INVALID_DATA);
 +  return xfer;
 +}
 +
 +uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
 +  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
 +  xfer += oprot->writeStructBegin("GetTablesRequest");
 +
 +  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
 +  xfer += oprot->writeString(this->dbName);
 +  xfer += oprot->writeFieldEnd();
 +
 +  if (this->__isset.tblNames) {
 +    xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2);
 +    {
 +      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tblNames.size()));
-       std::vector<std::string> ::const_iterator _iter795;
-       for (_iter795 = this->tblNames.begin(); _iter795 != this->tblNames.end(); ++_iter795)
++      std::vector<std::string> ::const_iterator _iter794;
++      for (_iter794 = this->tblNames.begin(); _iter794 != this->tblNames.end(); ++_iter794)
 +      {
-         xfer += oprot->writeString((*_iter795));
++        xfer += oprot->writeString((*_iter794));
 +      }
 +      xfer += oprot->writeListEnd();
 +    }
 +    xfer += oprot->writeFieldEnd();
 +  }
 +  if (this->__isset.capabilities) {
 +    xfer += oprot->writeFieldBegin("capabilities", ::apache::thrift::protocol::T_STRUCT, 3);
 +    xfer += this->capabilities.write(oprot);
 +    xfer += oprot->writeFieldEnd();
 +  }
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
 +void swap(GetTablesRequest &a, GetTablesRequest &b) {
 +  using ::std::swap;
 +  swap(a.dbName, b.dbName);
 +  swap(a.tblNames, b.tblNames);
 +  swap(a.capabilities, b.capabilities);
 +  swap(a.__isset, b.__isset);
 +}
 +
- GetTablesRequest::GetTablesRequest(const GetTablesRequest& other796) {
++GetTablesRequest::GetTablesRequest(const GetTablesRequest& other795) {
++  dbName = other795.dbName;
++  tblNames = other795.tblNames;
++  capabilities = other795.capabilities;
++  __isset = other795.__isset;
++}
++GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other796) {
 +  dbName = other796.dbName;
 +  tblNames = other796.tblNames;
 +  capabilities = other796.capabilities;
 +  __isset = other796.__isset;
- }
- GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other797) {
-   dbName = other797.dbName;
-   tblNames = other797.tblNames;
-   capabilities = other797.capabilities;
-   __isset = other797.__isset;
 +  return *this;
 +}
 +void GetTablesRequest::printTo(std::ostream& out) const {
 +  using ::apache::thrift::to_string;
 +  out << "GetTablesRequest(";
 +  out << "dbName=" << to_string(dbName);
 +  out << ", " << "tblNames="; (__isset.tblNames ? (out << to_string(tblNames)) : (out << "<null>"));
 +  out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "<null>"));
 +  out << ")";
 +}
 +
 +
 +GetTablesResult::~GetTablesResult() throw() {
 +}
 +
 +
 +void GetTablesResult::__set_tables(const std::vector<Table> & val) {
 +  this->tables = val;
 +}
 +
 +uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) {
  
    apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
    uint32_t xfer = 0;
@@@ -19487,14 -18594,14 +19531,14 @@@
          if (ftype == ::apache::thrift::protocol::T_LIST) {
            {
              this->tables.clear();
-             uint32_t _size798;
-             ::apache::thrift::protocol::TType _etype801;
-             xfer += iprot->readListBegin(_etype801, _size798);
-             this->tables.resize(_size798);
-             uint32_t _i802;
-             for (_i802 = 0; _i802 < _size798; ++_i802)
 -            uint32_t _size775;
 -            ::apache::thrift::protocol::TType _etype778;
 -            xfer += iprot->readListBegin(_etype778, _size775);
 -            this->tables.resize(_size775);
 -            uint32_t _i779;
 -            for (_i779 = 0; _i779 < _size775; ++_i779)
++            uint32_t _size797;
++            ::apache::thrift::protocol::TType _etype800;
++            xfer += iprot->readListBegin(_etype800, _size797);
++            this->tables.resize(_size797);
++            uint32_t _i801;
++            for (_i801 = 0; _i801 < _size797; ++_i801)
              {
-               xfer += this->tables[_i802].read(iprot);
 -              xfer += this->tables[_i779].read(iprot);
++              xfer += this->tables[_i801].read(iprot);
              }
              xfer += iprot->readListEnd();
            }
@@@ -19525,10 -18632,10 +19569,10 @@@ uint32_t GetTablesResult::write(::apach
    xfer += oprot->writeFieldBegin("tables", ::apache::thrift::protocol::T_LIST, 1);
    {
      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->tables.size()));
-     std::vector<Table> ::const_iterator _iter803;
-     for (_iter803 = this->tables.begin(); _iter803 != this->tables.end(); ++_iter803)
 -    std::vector<Table> ::const_iterator _iter780;
 -    for (_iter780 = this->tables.begin(); _iter780 != this->tables.end(); ++_iter780)
++    std::vector<Table> ::const_iterator _iter802;
++    for (_iter802 = this->tables.begin(); _iter802 != this->tables.end(); ++_iter802)
      {
-       xfer += (*_iter803).write(oprot);
 -      xfer += (*_iter780).write(oprot);
++      xfer += (*_iter802).write(oprot);
      }
      xfer += oprot->writeListEnd();
    }
@@@ -19544,11 -18651,11 +19588,11 @@@ void swap(GetTablesResult &a, GetTables
    swap(a.tables, b.tables);
  }
  
- GetTablesResult::GetTablesResult(const GetTablesResult& other804) {
-   tables = other804.tables;
 -GetTablesResult::GetTablesResult(const GetTablesResult& other781) {
 -  tables = other781.tables;
++GetTablesResult::GetTablesResult(const GetTablesResult& other803) {
++  tables = other803.tables;
  }
- GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other805) {
-   tables = other805.tables;
 -GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other782) {
 -  tables = other782.tables;
++GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other804) {
++  tables = other804.tables;
    return *this;
  }
  void GetTablesResult::printTo(std::ostream& out) const {
@@@ -19690,19 -18797,19 +19734,19 @@@ void swap(TableMeta &a, TableMeta &b) 
    swap(a.__isset, b.__isset);
  }
  
- TableMeta::TableMeta(const TableMeta& other806) {
 -TableMeta::TableMeta(const TableMeta& other783) {
 -  dbName = other783.dbName;
 -  tableName = other783.tableName;
 -  tableType = other783.tableType;
 -  comments = other783.comments;
 -  __isset = other783.__isset;
++TableMeta::TableMeta(const TableMeta& other805) {
++  dbName = other805.dbName;
++  tableName = other805.tableName;
++  tableType = other805.tableType;
++  comments = other805.comments;
++  __isset = other805.__isset;
+ }
 -TableMeta& TableMeta::operator=(const TableMeta& other784) {
 -  dbName = other784.dbName;
 -  tableName = other784.tableName;
 -  tableType = other784.tableType;
 -  comments = other784.comments;
 -  __isset = other784.__isset;
++TableMeta& TableMeta::operator=(const TableMeta& other806) {
 +  dbName = other806.dbName;
 +  tableName = other806.tableName;
 +  tableType = other806.tableType;
 +  comments = other806.comments;
 +  __isset = other806.__isset;
- }
- TableMeta& TableMeta::operator=(const TableMeta& other807) {
-   dbName = other807.dbName;
-   tableName = other807.tableName;
-   tableType = other807.tableType;
-   comments = other807.comments;
-   __isset = other807.__isset;
    return *this;
  }
  void TableMeta::printTo(std::ostream& out) const {
@@@ -19785,13 -18892,13 +19829,13 @@@ void swap(MetaException &a, MetaExcepti
    swap(a.__isset, b.__isset);
  }
  
- MetaException::MetaException(const MetaException& other808) : TException() {
 -MetaException::MetaException(const MetaException& other785) : TException() {
 -  message = other785.message;
 -  __isset = other785.__isset;
++MetaException::MetaException(const MetaException& other807) : TException() {
++  message = other807.message;
++  __isset = other807.__isset;
+ }
 -MetaException& MetaException::operator=(const MetaException& other786) {
 -  message = other786.message;
 -  __isset = other786.__isset;
++MetaException& MetaException::operator=(const MetaException& other808) {
 +  message = other808.message;
 +  __isset = other808.__isset;
- }
- MetaException& MetaException::operator=(const MetaException& other809) {
-   message = other809.message;
-   __isset = other809.__isset;
    return *this;
  }
  void MetaException::printTo(std::ostream& out) const {
@@@ -19882,13 -18989,13 +19926,13 @@@ void swap(UnknownTableException &a, Unk
    swap(a.__isset, b.__isset);
  }
  
- UnknownTableException::UnknownTableException(const UnknownTableException& other810) : TException() {
 -UnknownTableException::UnknownTableException(const UnknownTableException& other787) : TException() {
 -  message = other787.message;
 -  __isset = other787.__isset;
++UnknownTableException::UnknownTableException(const UnknownTableException& other809) : TException() {
++  message = other809.message;
++  __isset = other809.__isset;
+ }
 -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other788) {
 -  message = other788.message;
 -  __isset = other788.__isset;
++UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other810) {
 +  message = other810.message;
 +  __isset = other810.__isset;
- }
- UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other811) {
-   message = other811.message;
-   __isset = other811.__isset;
    return *this;
  }
  void UnknownTableException::printTo(std::ostream& out) const {
@@@ -19979,13 -19086,13 +20023,13 @@@ void swap(UnknownDBException &a, Unknow
    swap(a.__isset, b.__isset);
  }
  
- UnknownDBException::UnknownDBException(const UnknownDBException& other812) : TException() {
 -UnknownDBException::UnknownDBException(const UnknownDBException& other789) : TException() {
 -  message = other789.message;
 -  __isset = other789.__isset;
++UnknownDBException::UnknownDBException(const UnknownDBException& other811) : TException() {
++  message = other811.message;
++  __isset = other811.__isset;
+ }
 -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other790) {
 -  message = other790.message;
 -  __isset = other790.__isset;
++UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other812) {
 +  message = other812.message;
 +  __isset = other812.__isset;
- }
- UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other813) {
-   message = other813.message;
-   __isset = other813.__isset;
    return *this;
  }
  void UnknownDBException::printTo(std::ostream& out) const {
@@@ -20076,13 -19183,13 +20120,13 @@@ void swap(AlreadyExistsException &a, Al
    swap(a.__isset, b.__isset);
  }
  
- AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other814) : TException() {
 -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other791) : TException() {
 -  message = other791.message;
 -  __isset = other791.__isset;
++AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other813) : TException() {
++  message = other813.message;
++  __isset = other813.__isset;
+ }
 -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other792) {
 -  message = other792.message;
 -  __isset = other792.__isset;
++AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other814) {
 +  message = other814.message;
 +  __isset = other814.__isset;
- }
- AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other815) {
-   message = other815.message;
-   __isset = other815.__isset;
    return *this;
  }
  void AlreadyExistsException::printTo(std::ostream& out) const {
@@@ -20173,13 -19280,13 +20217,13 @@@ void swap(InvalidPartitionException &a
    swap(a.__isset, b.__isset);
  }
  
- InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other816) : TException() {
 -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other793) : TException() {
 -  message = other793.message;
 -  __isset = other793.__isset;
++InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other815) : TException() {
++  message = other815.message;
++  __isset = other815.__isset;
+ }
 -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other794) {
 -  message = other794.message;
 -  __isset = other794.__isset;
++InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other816) {
 +  message = other816.message;
 +  __isset = other816.__isset;
- }
- InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other817) {
-   message = other817.message;
-   __isset = other817.__isset;
    return *this;
  }
  void InvalidPartitionException::printTo(std::ostream& out) const {
@@@ -20270,13 -19377,13 +20314,13 @@@ void swap(UnknownPartitionException &a
    swap(a.__isset, b.__isset);
  }
  
- UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other818) : TException() {
 -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other795) : TException() {
 -  message = other795.message;
 -  __isset = other795.__isset;
++UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other817) : TException() {
++  message = other817.message;
++  __isset = other817.__isset;
+ }
 -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other796) {
 -  message = other796.message;
 -  __isset = other796.__isset;
++UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other818) {
 +  message = other818.message;
 +  __isset = other818.__isset;
- }
- UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other819) {
-   message = other819.message;
-   __isset = other819.__isset;
    return *this;
  }
  void UnknownPartitionException::printTo(std::ostream& out) const {
@@@ -20367,13 -19474,13 +20411,13 @@@ void swap(InvalidObjectException &a, In
    swap(a.__isset, b.__isset);
  }
  
- InvalidObjectException::InvalidObjectException(const InvalidObjectException& other820) : TException() {
 -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other797) : TException() {
 -  message = other797.message;
 -  __isset = other797.__isset;
++InvalidObjectException::InvalidObjectException(const InvalidObjectException& other819) : TException() {
++  message = other819.message;
++  __isset = other819.__isset;
+ }
 -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other798) {
 -  message = other798.message;
 -  __isset = other798.__isset;
++InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other820) {
 +  message = other820.message;
 +  __isset = other820.__isset;
- }
- InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other821) {
-   message = other821.message;
-   __isset = other821.__isset;
    return *this;
  }
  void InvalidObjectException::printTo(std::ostream& out) const {
@@@ -20464,13 -19571,13 +20508,13 @@@ void swap(NoSuchObjectException &a, NoS
    swap(a.__isset, b.__isset);
  }
  
- NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other822) : TException() {
 -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other799) : TException() {
 -  message = other799.message;
 -  __isset = other799.__isset;
++NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other821) : TException() {
++  message = other821.message;
++  __isset = other821.__isset;
+ }
 -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other800) {
 -  message = other800.message;
 -  __isset = other800.__isset;
++NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other822) {
 +  message = other822.message;
 +  __isset = other822.__isset;
- }
- NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other823) {
-   message = other823.message;
-   __isset = other823.__isset;
    return *this;
  }
  void NoSuchObjectException::printTo(std::ostream& out) const {
@@@ -20561,13 -19668,13 +20605,13 @@@ void swap(IndexAlreadyExistsException &
    swap(a.__isset, b.__isset);
  }
  
- IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other824) : TException() {
 -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other801) : TException() {
 -  message = other801.message;
 -  __isset = other801.__isset;
++IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other823) : TException() {
++  message = other823.message;
++  __isset = other823.__isset;
+ }
 -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other802) {
 -  message = other802.message;
 -  __isset = other802.__isset;
++IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other824) {
 +  message = other824.message;
 +  __isset = other824.__isset;
- }
- IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other825) {
-   message = other825.message;
-   __isset = other825.__isset;
    return *this;
  }
  void IndexAlreadyExistsException::printTo(std::ostream& out) const {
@@@ -20658,13 -19765,13 +20702,13 @@@ void swap(InvalidOperationException &a
    swap(a.__isset, b.__isset);
  }
  
- InvalidOperationException::InvalidOperationException(const InvalidOperationException& other826) : TException() {
 -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other803) : TException() {
 -  message = other803.message;
 -  __isset = other803.__isset;
++InvalidOperationException::InvalidOperationException(const InvalidOperationException& other825) : TException() {
++  message = other825.message;
++  __isset = other825.__isset;
+ }
 -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other804) {
 -  message = other804.message;
 -  __isset = other804.__isset;
++InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other826) {
 +  message = other826.message;
 +  __isset = other826.__isset;
- }
- InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other827) {
-   message = other827.message;
-   __isset = other827.__isset;
    return *this;
  }
  void InvalidOperationException::printTo(std::ostream& out) const {
@@@ -20755,13 -19862,13 +20799,13 @@@ void swap(ConfigValSecurityException &a
    swap(a.__isset, b.__isset);
  }
  
- ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other828) : TException() {
 -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other805) : TException() {
 -  message = other805.message;
 -  __isset = other805.__isset;
++ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other827) : TException() {
++  message = other827.message;
++  __isset = other827.__isset;
+ }
 -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other806) {
 -  message = other806.message;
 -  __isset = other806.__isset;
++ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other828) {
 +  message = other828.message;
 +  __isset = other828.__isset;
- }
- ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other829) {
-   message = other829.message;
-   __isset = other829.__isset;
    return *this;
  }
  void ConfigValSecurityException::printTo(std::ostream& out) const {
@@@ -20852,13 -19959,13 +20896,13 @@@ void swap(InvalidInputException &a, Inv
    swap(a.__isset, b.__isset);
  }
  
- InvalidInputException::InvalidInputException(const InvalidInputException& other830) : TException() {
 -InvalidInputException::InvalidInputException(const InvalidInputException& other807) : TException() {
 -  message = other807.message;
 -  __isset = other807.__isset;
++InvalidInputException::InvalidInputException(const InvalidInputException& other829) : TException() {
++  message = other829.message;
++  __isset = other829.__isset;
+ }
 -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other808) {
 -  message = other808.message;
 -  __isset = other808.__isset;
++InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other830) {
 +  message = other830.message;
 +  __isset = other830.__isset;
- }
- InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other831) {
-   message = other831.message;
-   __isset = other831.__isset;
    return *this;
  }
  void InvalidInputException::printTo(std::ostream& out) const {
@@@ -20949,13 -20056,13 +20993,13 @@@ void swap(NoSuchTxnException &a, NoSuch
    swap(a.__isset, b.__isset);
  }
  
- NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other832) : TException() {
 -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other809) : TException() {
 -  message = other809.message;
 -  __isset = other809.__isset;
++NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other831) : TException() {
++  message = other831.message;
++  __isset = other831.__isset;
+ }
 -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other810) {
 -  message = other810.message;
 -  __isset = other810.__isset;
++NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other832) {
 +  message = other832.message;
 +  __isset = other832.__isset;
- }
- NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other833) {
-   message = other833.message;
-   __isset = other833.__isset;
    return *this;
  }
  void NoSuchTxnException::printTo(std::ostream& out) const {
@@@ -21046,13 -20153,13 +21090,13 @@@ void swap(TxnAbortedException &a, TxnAb
    swap(a.__isset, b.__isset);
  }
  
- TxnAbortedException::TxnAbortedException(const TxnAbortedException& other834) : TException() {
 -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other811) : TException() {
 -  message = other811.message;
 -  __isset = other811.__isset;
++TxnAbortedException::TxnAbortedException(const TxnAbortedException& other833) : TException() {
++  message = other833.message;
++  __isset = other833.__isset;
+ }
 -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other812) {
 -  message = other812.message;
 -  __isset = other812.__isset;
++TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other834) {
 +  message = other834.message;
 +  __isset = other834.__isset;
- }
- TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other835) {
-   message = other835.message;
-   __isset = other835.__isset;
    return *this;
  }
  void TxnAbortedException::printTo(std::ostream& out) const {
@@@ -21143,13 -20250,13 +21187,13 @@@ void swap(TxnOpenException &a, TxnOpenE
    swap(a.__isset, b.__isset);
  }
  
- TxnOpenException::TxnOpenException(const TxnOpenException& other836) : TException() {
 -TxnOpenException::TxnOpenException(const TxnOpenException& other813) : TException() {
 -  message = other813.message;
 -  __isset = other813.__isset;
++TxnOpenException::TxnOpenException(const TxnOpenException& other835) : TException() {
++  message = other835.message;
++  __isset = other835.__isset;
+ }
 -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other814) {
 -  message = other814.message;
 -  __isset = other814.__isset;
++TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other836) {
 +  message = other836.message;
 +  __isset = other836.__isset;
- }
- TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other837) {
-   message = other837.message;
-   __isset = other837.__isset;
    return *this;
  }
  void TxnOpenException::printTo(std::ostream& out) const {
@@@ -21240,13 -20347,13 +21284,13 @@@ void swap(NoSuchLockException &a, NoSuc
    swap(a.__isset, b.__isset);
  }
  
- NoSuchLockException::NoSuchLockException(const NoSuchLockException& other838) : TException() {
 -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other815) : TException() {
 -  message = other815.message;
 -  __isset = other815.__isset;
++NoSuchLockException::NoSuchLockException(const NoSuchLockException& other837) : TException() {
++  message = other837.message;
++  __isset = other837.__isset;
+ }
 -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other816) {
 -  message = other816.message;
 -  __isset = other816.__isset;
++NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other838) {
 +  message = other838.message;
 +  __isset = other838.__isset;
- }
- NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other839) {
-   message = other839.message;
-   __isset = other839.__isset;
    return *this;
  }
  void NoSuchLockException::printTo(std::ostream& out) const {

http://git-wip-us.apache.org/repos/asf/hive/blob/1ceaf357/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------


[33/50] [abbrv] hive git commit: HIVE-13583: E061-14: Search Conditions (Zoltan Haindrich, reviewed by Ashutosh Chauhan)

Posted by we...@apache.org.
HIVE-13583: E061-14: Search Conditions (Zoltan Haindrich, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/54dbca69
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/54dbca69
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/54dbca69

Branch: refs/heads/hive-14535
Commit: 54dbca69c9ea630b9cccd5550bdb455b9bbc240c
Parents: 0f8840a
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Mon May 8 07:27:01 2017 +0200
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Mon May 8 07:44:07 2017 +0200

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   4 +
 .../translator/SqlFunctionConverter.java        |   4 +-
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |   4 -
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |  19 ++-
 .../hadoop/hive/ql/parse/SubQueryUtils.java     |   2 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |   9 --
 .../hive/ql/udf/generic/GenericUDFOPFalse.java  |  65 +++++++++
 .../ql/udf/generic/GenericUDFOPNotFalse.java    |  65 +++++++++
 .../ql/udf/generic/GenericUDFOPNotTrue.java     |  65 +++++++++
 .../hive/ql/udf/generic/GenericUDFOPTrue.java   |  65 +++++++++
 .../apache/hadoop/hive/ql/parse/TestIUD.java    |   8 +-
 .../hive/ql/parse/TestMergeStatement.java       |   4 +-
 ql/src/test/queries/clientpositive/udf_isops.q  |  34 +++++
 .../results/clientpositive/show_functions.q.out |   8 +
 .../test/results/clientpositive/udf_isops.q.out | 146 +++++++++++++++++++
 15 files changed, 472 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 1b556ac..bf18a8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -359,6 +359,10 @@ public final class FunctionRegistry {
 
     system.registerGenericUDF("isnull", GenericUDFOPNull.class);
     system.registerGenericUDF("isnotnull", GenericUDFOPNotNull.class);
+    system.registerGenericUDF("istrue", GenericUDFOPTrue.class);
+    system.registerGenericUDF("isnottrue", GenericUDFOPNotTrue.class);
+    system.registerGenericUDF("isfalse", GenericUDFOPFalse.class);
+    system.registerGenericUDF("isnotfalse", GenericUDFOPNotFalse.class);
 
     system.registerGenericUDF("if", GenericUDFIf.class);
     system.registerGenericUDF("in", GenericUDFIn.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 10f5eb3..c6b34d4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -350,8 +350,8 @@ public class SqlFunctionConverter {
       registerFunction("in", HiveIn.INSTANCE, hToken(HiveParser.Identifier, "in"));
       registerFunction("between", HiveBetween.INSTANCE, hToken(HiveParser.Identifier, "between"));
       registerFunction("struct", SqlStdOperatorTable.ROW, hToken(HiveParser.Identifier, "struct"));
-      registerFunction("isnotnull", SqlStdOperatorTable.IS_NOT_NULL, hToken(HiveParser.TOK_ISNOTNULL, "TOK_ISNOTNULL"));
-      registerFunction("isnull", SqlStdOperatorTable.IS_NULL, hToken(HiveParser.TOK_ISNULL, "TOK_ISNULL"));
+      registerFunction("isnotnull", SqlStdOperatorTable.IS_NOT_NULL, hToken(HiveParser.Identifier, "isnotnull"));
+      registerFunction("isnull", SqlStdOperatorTable.IS_NULL, hToken(HiveParser.Identifier, "isnull"));
       registerFunction("is not distinct from", SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, hToken(HiveParser.EQUAL_NS, "<=>"));
       registerFunction("when", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
       registerDuplicateFunction("case", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index ca639d3..3136c93 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -105,8 +105,6 @@ TOK_IMPORT;
 TOK_REPLICATION;
 TOK_METADATA;
 TOK_NULL;
-TOK_ISNULL;
-TOK_ISNOTNULL;
 TOK_PRIMARY_KEY;
 TOK_FOREIGN_KEY;
 TOK_VALIDATE;
@@ -397,8 +395,6 @@ TOK_OPERATOR;
 TOK_EXPRESSION;
 TOK_DETAIL;
 TOK_BLOCKING;
-TOK_LIKEANY;
-TOK_LIKEALL;
 }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index 645ced9..1c78c1a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -435,10 +435,13 @@ precedenceUnaryOperator
     PLUS | MINUS | TILDE
     ;
 
-nullCondition
-    :
-    KW_NULL -> ^(TOK_ISNULL)
-    | KW_NOT KW_NULL -> ^(TOK_ISNOTNULL)
+isCondition
+    : KW_NULL -> Identifier["isnull"]
+    | KW_TRUE -> Identifier["istrue"]
+    | KW_FALSE -> Identifier["isfalse"]
+    | KW_NOT KW_NULL -> Identifier["isnotnull"]
+    | KW_NOT KW_TRUE -> Identifier["isnottrue"]
+    | KW_NOT KW_FALSE -> Identifier["isnotfalse"]
     ;
 
 precedenceUnaryPrefixExpression
@@ -447,8 +450,8 @@ precedenceUnaryPrefixExpression
     ;
 
 precedenceUnarySuffixExpression
-    : precedenceUnaryPrefixExpression (a=KW_IS nullCondition)?
-    -> {$a != null}? ^(TOK_FUNCTION nullCondition precedenceUnaryPrefixExpression)
+    : precedenceUnaryPrefixExpression (a=KW_IS isCondition)?
+    -> {$a != null}? ^(TOK_FUNCTION isCondition precedenceUnaryPrefixExpression)
     -> precedenceUnaryPrefixExpression
     ;
 
@@ -569,10 +572,10 @@ precedenceSimilarExpressionAtom[CommonTree t]
     -> ^(TOK_FUNCTION Identifier["between"] KW_FALSE {$t} $min $max)
     |
     KW_LIKE KW_ANY (expr=expressionsInParenthesis[false])
-    -> ^(TOK_FUNCTION TOK_LIKEANY {$t} {$expr.tree})
+    -> ^(TOK_FUNCTION Identifier["likeany"] {$t} {$expr.tree})
     |
     KW_LIKE KW_ALL (expr=expressionsInParenthesis[false])
-    -> ^(TOK_FUNCTION TOK_LIKEALL {$t} {$expr.tree})
+    -> ^(TOK_FUNCTION Identifier["likeall"] {$t} {$expr.tree})
     ;
 
 precedenceSimilarExpressionIn[CommonTree t]

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
index 06cf56d..45db6f6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
@@ -95,7 +95,7 @@ public class SubQueryUtils {
 
   static ASTNode isNull(ASTNode expr) {
     ASTNode node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION");
-    node.addChild((ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_ISNULL, "TOK_ISNULL"));
+    node.addChild((ASTNode) ParseDriver.adaptor.create(HiveParser.Identifier, "isnull"));
     node.addChild(expr);
     return node;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index c3227c9..82141be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -198,7 +198,6 @@ public class TypeCheckProcFactory {
         + HiveParser.KW_WHEN + "%|" + HiveParser.KW_IN + "%|"
         + HiveParser.KW_ARRAY + "%|" + HiveParser.KW_MAP + "%|"
         + HiveParser.KW_STRUCT + "%|" + HiveParser.KW_EXISTS + "%|"
-        + HiveParser.TOK_LIKEALL + "%|" + HiveParser.TOK_LIKEANY + "%|"
         + HiveParser.TOK_SUBQUERY_OP_NOTIN + "%"),
         tf.getStrExprProcessor());
     opRules.put(new RuleRegExp("R4", HiveParser.KW_TRUE + "%|"
@@ -715,18 +714,12 @@ public class TypeCheckProcFactory {
   public static class DefaultExprProcessor implements NodeProcessor {
 
     static HashMap<Integer, String> specialUnaryOperatorTextHashMap;
-    static HashMap<Integer, String> specialFunctionTextHashMap;
     static HashMap<Integer, String> conversionFunctionTextHashMap;
     static HashSet<Integer> windowingTokens;
     static {
       specialUnaryOperatorTextHashMap = new HashMap<Integer, String>();
       specialUnaryOperatorTextHashMap.put(HiveParser.PLUS, "positive");
       specialUnaryOperatorTextHashMap.put(HiveParser.MINUS, "negative");
-      specialFunctionTextHashMap = new HashMap<Integer, String>();
-      specialFunctionTextHashMap.put(HiveParser.TOK_ISNULL, "isnull");
-      specialFunctionTextHashMap.put(HiveParser.TOK_ISNOTNULL, "isnotnull");
-      specialFunctionTextHashMap.put(HiveParser.TOK_LIKEANY, "likeany");
-      specialFunctionTextHashMap.put(HiveParser.TOK_LIKEALL, "likeall");
       conversionFunctionTextHashMap = new HashMap<Integer, String>();
       conversionFunctionTextHashMap.put(HiveParser.TOK_BOOLEAN,
           serdeConstants.BOOLEAN_TYPE_NAME);
@@ -818,7 +811,6 @@ public class TypeCheckProcFactory {
         // special dictionary.
         assert (expr.getChildCount() >= 1);
         int funcType = ((ASTNode) expr.getChild(0)).getType();
-        funcText = specialFunctionTextHashMap.get(funcType);
         if (funcText == null) {
           funcText = conversionFunctionTextHashMap.get(funcType);
         }
@@ -1305,7 +1297,6 @@ public class TypeCheckProcFactory {
 
       // Return nulls for conversion operators
       if (conversionFunctionTextHashMap.keySet().contains(expr.getType())
-          || specialFunctionTextHashMap.keySet().contains(expr.getType())
           || expr.getToken().getType() == HiveParser.CharSetName
           || expr.getToken().getType() == HiveParser.CharSetLiteral) {
         return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPFalse.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPFalse.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPFalse.java
new file mode 100644
index 0000000..b74abd3
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPFalse.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.BooleanWritable;
+
+@Description(name = "isfalse", value = "_FUNC_ a - Returns true if a is FALSE and false otherwise")
+@NDV(maxNdv = 2)
+public class GenericUDFOPFalse extends GenericUDF {
+  private final BooleanWritable result = new BooleanWritable();
+  private Converter conditionConverter;
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    if (arguments.length != 1) {
+      throw new UDFArgumentLengthException("Invalid number of arguments");
+    }
+    conditionConverter = ObjectInspectorConverters.getConverter(arguments[0],
+        PrimitiveObjectInspectorFactory.writableBooleanObjectInspector);
+
+    return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    BooleanWritable condition = (BooleanWritable) conditionConverter.convert(arguments[0].get());
+    result.set(condition != null && !condition.get());
+    return result;
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    assert (children.length == 1);
+    return children[0] + " is false";
+  }
+
+  @Override
+  public GenericUDF negative() {
+    return new GenericUDFOPNotFalse();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotFalse.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotFalse.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotFalse.java
new file mode 100644
index 0000000..229ed4d
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotFalse.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.BooleanWritable;
+
+@Description(name = "isnotfalse", value = "_FUNC_ a - Returns true if a is NOT FALSE and false otherwise")
+@NDV(maxNdv = 2)
+public class GenericUDFOPNotFalse extends GenericUDF {
+  private final BooleanWritable result = new BooleanWritable();
+  private Converter conditionConverter;
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    if (arguments.length != 1) {
+      throw new UDFArgumentLengthException("Invalid number of arguments");
+    }
+    conditionConverter = ObjectInspectorConverters.getConverter(arguments[0],
+        PrimitiveObjectInspectorFactory.writableBooleanObjectInspector);
+
+    return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    BooleanWritable condition = (BooleanWritable) conditionConverter.convert(arguments[0].get());
+    result.set(condition == null || condition.get());
+    return result;
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    assert (children.length == 1);
+    return children[0] + " is not false";
+  }
+
+  @Override
+  public GenericUDF negative() {
+    return new GenericUDFOPFalse();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotTrue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotTrue.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotTrue.java
new file mode 100644
index 0000000..bf639af
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotTrue.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.BooleanWritable;
+
+@Description(name = "isnottrue", value = "_FUNC_ a - Returns true if a is NOT TRUE and false otherwise")
+@NDV(maxNdv = 2)
+public class GenericUDFOPNotTrue extends GenericUDF {
+  private final BooleanWritable result = new BooleanWritable();
+  private Converter conditionConverter;
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    if (arguments.length != 1) {
+      throw new UDFArgumentLengthException("Invalid number of arguments");
+    }
+    conditionConverter = ObjectInspectorConverters.getConverter(arguments[0],
+        PrimitiveObjectInspectorFactory.writableBooleanObjectInspector);
+
+    return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    BooleanWritable condition = (BooleanWritable) conditionConverter.convert(arguments[0].get());
+    result.set(condition == null || !condition.get());
+    return result;
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    assert (children.length == 1);
+    return children[0] + " is not true";
+  }
+
+  @Override
+  public GenericUDF negative() {
+    return new GenericUDFOPTrue();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPTrue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPTrue.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPTrue.java
new file mode 100644
index 0000000..764f705
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPTrue.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.io.BooleanWritable;
+
+@Description(name = "istrue", value = "_FUNC_ a - Returns true if a is TRUE and false otherwise")
+@NDV(maxNdv = 2)
+public class GenericUDFOPTrue extends GenericUDF {
+  private final BooleanWritable result = new BooleanWritable();
+  private Converter conditionConverter;
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    if (arguments.length != 1) {
+      throw new UDFArgumentLengthException("Invalid number of arguments");
+    }
+    conditionConverter = ObjectInspectorConverters.getConverter(arguments[0],
+        PrimitiveObjectInspectorFactory.writableBooleanObjectInspector);
+
+    return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    BooleanWritable condition = (BooleanWritable) conditionConverter.convert(arguments[0].get());
+    result.set(condition != null && condition.get());
+    return result;
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    assert (children.length == 1);
+    return children[0] + " is true";
+  }
+
+  @Override
+  public GenericUDF negative() {
+    return new GenericUDFOPNotTrue();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
index 4ed765d..5f8ed93 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java
@@ -73,7 +73,7 @@ public class TestIUD {
         "(tok_tabname src) " +
         "(tok_where " +
           "(and " +
-            "(tok_function tok_isnotnull (tok_table_or_col key)) " +
+            "(tok_function isnotnull (tok_table_or_col key)) " +
             "(< (. (tok_table_or_col src) value) 0))))",
       ast.toStringTree());
   }
@@ -110,7 +110,7 @@ public class TestIUD {
         "(tok_set_columns_clause " +
           "(= " +
             "(tok_table_or_col key) 3)) " +
-        "(tok_where (tok_function tok_isnull (tok_table_or_col value))))",
+        "(tok_where (tok_function isnull (tok_table_or_col value))))",
       ast.toStringTree());
   }
   @Test
@@ -122,7 +122,7 @@ public class TestIUD {
         "(= (tok_table_or_col key) (+ (- 3) (% (* 5 9) 8))) " +
         "(= (tok_table_or_col val) (tok_function tok_int (+ 6.1 (tok_table_or_col c)))) " +
         "(= (tok_table_or_col d) (- (tok_table_or_col d) 1))) " +
-        "(tok_where (tok_function tok_isnull (tok_table_or_col value))))",
+        "(tok_where (tok_function isnull (tok_table_or_col value))))",
       ast.toStringTree());
   }
   @Test
@@ -150,7 +150,7 @@ public class TestIUD {
         "(tok_select " +
           "(tok_selexpr (. (tok_table_or_col pvs) viewtime)) " +
           "(tok_selexpr (. (tok_table_or_col pvs) userid))) " +
-        "(tok_where (tok_function tok_isnull (. (tok_table_or_col pvs) userid)))))",
+        "(tok_where (tok_function isnull (. (tok_table_or_col pvs) userid)))))",
       ast.toStringTree());
   }
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMergeStatement.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMergeStatement.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMergeStatement.java
index 8e0ac9d..5bdffb5 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMergeStatement.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestMergeStatement.java
@@ -87,7 +87,7 @@ public class TestMergeStatement {
             "(tok_update " +
               "(tok_set_columns_clause " +
                 "(= (tok_table_or_col a) (. (tok_table_or_col source) b)) " +
-                "(= (tok_table_or_col b) (tok_function when (tok_function tok_isnull (tok_table_or_col c1)) (tok_table_or_col c1) (tok_table_or_col c1)))" +
+                "(= (tok_table_or_col b) (tok_function when (tok_function isnull (tok_table_or_col c1)) (tok_table_or_col c1) (tok_table_or_col c1)))" +
               ")" +
             ") " +
           "(< (. (tok_table_or_col source) c2) (tok_function current_time)))" +
@@ -129,7 +129,7 @@ public class TestMergeStatement {
             "(tok_value_row " +
               "(. (tok_table_or_col source) a) " +
                 "(tok_function when " +
-                  "(tok_function tok_isnull (. (tok_table_or_col source) b)) (. (tok_table_or_col target) b) " +
+                  "(tok_function isnull (. (tok_table_or_col source) b)) (. (tok_table_or_col target) b) " +
                   "(. (tok_table_or_col source) b)" +
                 ")" +
               ")" +

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/test/queries/clientpositive/udf_isops.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_isops.q b/ql/src/test/queries/clientpositive/udf_isops.q
new file mode 100644
index 0000000..414209e
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/udf_isops.q
@@ -0,0 +1,34 @@
+drop table if exists t;
+create table t (a int,v int, b boolean);
+insert into t values (1,null, true);
+insert into t values (2,1,    false);
+insert into t values (3,2,    null);
+
+select assert_true(sum(a*a) = 1) from t
+	where v is null;
+select assert_true(sum(a*a) = 2*2+3*3) from t
+	where v is not null;
+
+select assert_true(sum(a*a) = 1) from t
+	where b is true;
+
+select assert_true(sum(a*a) = 2*2 + 3*3) from t
+	where b is not true;
+
+select assert_true(sum(a*a) = 4) from t
+	where b is false;
+
+select assert_true(sum(a*a) = 1*1 + 3*3) from t
+	where b is not false;
+
+select assert_true(sum(a*a) = 2*2) from t
+	where (v>0 and v<2) is true;
+
+select assert_true(sum(a*a) = 2*2) from t
+	where (v<2) is true;
+
+select  NULL is true,
+        NULL is not true,
+        NULL is false,
+        NULL is not false
+from t;

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/test/results/clientpositive/show_functions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_functions.q.out b/ql/src/test/results/clientpositive/show_functions.q.out
index e638638..31653a6 100644
--- a/ql/src/test/results/clientpositive/show_functions.q.out
+++ b/ql/src/test/results/clientpositive/show_functions.q.out
@@ -121,8 +121,12 @@ initcap
 inline
 instr
 internal_interval
+isfalse
+isnotfalse
 isnotnull
+isnottrue
 isnull
+istrue
 java_method
 json_tuple
 lag
@@ -327,6 +331,10 @@ floor_minute
 from_unixtime
 in_file
 inline
+isfalse
+isnotfalse
+isnottrue
+istrue
 json_tuple
 last_value
 lcase

http://git-wip-us.apache.org/repos/asf/hive/blob/54dbca69/ql/src/test/results/clientpositive/udf_isops.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_isops.q.out b/ql/src/test/results/clientpositive/udf_isops.q.out
new file mode 100644
index 0000000..0b6b35e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/udf_isops.q.out
@@ -0,0 +1,146 @@
+PREHOOK: query: drop table if exists t
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists t
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table t (a int,v int, b boolean)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t
+POSTHOOK: query: create table t (a int,v int, b boolean)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t
+PREHOOK: query: insert into t values (1,null, true)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@t
+POSTHOOK: query: insert into t values (1,null, true)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@t
+POSTHOOK: Lineage: t.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: t.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: t.v EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: insert into t values (2,1,    false)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@t
+POSTHOOK: query: insert into t values (2,1,    false)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@t
+POSTHOOK: Lineage: t.a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: t.b EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: t.v EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: insert into t values (3,2,    null)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@t
+POSTHOOK: query: insert into t values (3,2,    null)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@t
+POSTHOOK: Lineage: t.a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: t.b EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: t.v EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: select assert_true(sum(a*a) = 1) from t
+	where v is null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select assert_true(sum(a*a) = 1) from t
+	where v is null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select assert_true(sum(a*a) = 2*2+3*3) from t
+	where v is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select assert_true(sum(a*a) = 2*2+3*3) from t
+	where v is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select assert_true(sum(a*a) = 1) from t
+	where b is true
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select assert_true(sum(a*a) = 1) from t
+	where b is true
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select assert_true(sum(a*a) = 2*2 + 3*3) from t
+	where b is not true
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select assert_true(sum(a*a) = 2*2 + 3*3) from t
+	where b is not true
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select assert_true(sum(a*a) = 4) from t
+	where b is false
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select assert_true(sum(a*a) = 4) from t
+	where b is false
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select assert_true(sum(a*a) = 1*1 + 3*3) from t
+	where b is not false
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select assert_true(sum(a*a) = 1*1 + 3*3) from t
+	where b is not false
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select assert_true(sum(a*a) = 2*2) from t
+	where (v>0 and v<2) is true
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select assert_true(sum(a*a) = 2*2) from t
+	where (v>0 and v<2) is true
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select assert_true(sum(a*a) = 2*2) from t
+	where (v<2) is true
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select assert_true(sum(a*a) = 2*2) from t
+	where (v<2) is true
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select  NULL is true,
+        NULL is not true,
+        NULL is false,
+        NULL is not false
+from t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select  NULL is true,
+        NULL is not true,
+        NULL is false,
+        NULL is not false
+from t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+false	true	false	true
+false	true	false	true
+false	true	false	true